diff options
Diffstat (limited to 'drivers/acpi')
344 files changed, 32694 insertions, 14290 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 90ff0a47c12e..ca00a5dbcf75 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -11,6 +11,8 @@ menuconfig ACPI depends on ARCH_SUPPORTS_ACPI select PNP select NLS + select CRC32 + select FIRMWARE_TABLE default y if X86 help Advanced Configuration and Power Interface (ACPI) support for @@ -26,13 +28,10 @@ menuconfig ACPI Management (APM) specification. If both ACPI and APM support are configured, ACPI is used. - The project home page for the Linux ACPI subsystem is here: - <https://01.org/linux-acpi> - Linux support for ACPI is based on Intel Corporation's ACPI Component Architecture (ACPI CA). For more information on the ACPI CA, see: - <http://acpica.org/> + <https://acpica.org/> ACPI is an open industry specification originally co-developed by Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba. Currently, @@ -40,8 +39,7 @@ menuconfig ACPI the UEFI Forum and any UEFI member can join the ASWG and contribute to the ACPI specification. The specification is available at: - <http://www.acpi.info> - <http://www.uefi.org/acpi/specs> + <https://uefi.org/specifications> if ACPI @@ -60,6 +58,13 @@ config ACPI_SYSTEM_POWER_STATES_SUPPORT config ACPI_CCA_REQUIRED bool +config ACPI_TABLE_LIB + bool + +config ACPI_THERMAL_LIB + depends on THERMAL + bool + config ACPI_DEBUGGER bool "AML debugger interface" select ACPI_DEBUG @@ -72,7 +77,7 @@ config ACPI_DEBUGGER if ACPI_DEBUGGER config ACPI_DEBUGGER_USER - tristate "Userspace debugger accessiblity" + tristate "Userspace debugger accessibility" depends on DEBUG_FS help Export /sys/kernel/debug/acpi/acpidbg for userspace utilities @@ -88,6 +93,14 @@ config ACPI_SPCR_TABLE This table provides information about the configuration of the earlycon console. +config ACPI_FPDT + bool "ACPI Firmware Performance Data Table (FPDT) support" + depends on X86_64 || ARM64 + help + Enable support for the Firmware Performance Data Table (FPDT). + This table provides information on the timing of the system + boot, S3 suspend and S3 resume firmware code paths. + config ACPI_LPIT bool depends on X86_64 @@ -99,23 +112,6 @@ config ACPI_SLEEP depends on ACPI_SYSTEM_POWER_STATES_SUPPORT default y -config ACPI_PROCFS_POWER - bool "Deprecated power /proc/acpi directories" - depends on X86 && PROC_FS - help - For backwards compatibility, this option allows - deprecated power /proc/acpi/ directories to exist, even when - they have been replaced by functions in /sys. - The deprecated directories (and their replacements) include: - /proc/acpi/battery/* (/sys/class/power_supply/*) and - /proc/acpi/ac_adapter/* (sys/class/power_supply/*). - This option has no effect on /proc/acpi/ directories - and functions which do not yet exist in /sys. - This option, together with the proc directories, will be - deleted in the future. - - Say N to delete power /proc/acpi/ directories that have moved to /sys. - config ACPI_REV_OVERRIDE_POSSIBLE bool "Allow supported ACPI revision to be overridden" depends on X86 @@ -136,8 +132,17 @@ config ACPI_REV_OVERRIDE_POSSIBLE makes it possible to force the kernel to return "5" as the supported ACPI revision via the "acpi_rev_override" command line switch. +config ACPI_EC + bool "Embedded Controller" + depends on HAS_IOPORT + default X86 || LOONGARCH + help + This driver handles communication with the microcontroller + on many x86/LoongArch laptops and other machines. + config ACPI_EC_DEBUGFS tristate "EC read/write access through /sys/kernel/debug/ec" + depends on ACPI_EC help Say N to disable Embedded Controller /sys/kernel/debug interface @@ -155,7 +160,6 @@ config ACPI_EC_DEBUGFS config ACPI_AC tristate "AC Adapter" - depends on X86 select POWER_SUPPLY default y help @@ -168,7 +172,6 @@ config ACPI_AC config ACPI_BATTERY tristate "Battery" - depends on X86 select POWER_SUPPLY default y help @@ -192,10 +195,35 @@ config ACPI_BUTTON To compile this driver as a module, choose M here: the module will be called button. +config ACPI_TINY_POWER_BUTTON + tristate "Tiny Power Button Driver" + depends on !ACPI_BUTTON + help + This driver provides a tiny alternative to the ACPI Button driver. + The tiny power button driver only handles the power button. Rather + than notifying userspace via the input layer or a netlink event, this + driver directly signals the init process to shut down. + + This driver is particularly suitable for cloud and VM environments, + which use a simulated power button to initiate a controlled poweroff, + but which may not want to run a separate userspace daemon to process + input events. + +config ACPI_TINY_POWER_BUTTON_SIGNAL + int "Tiny Power Button Signal" + depends on ACPI_TINY_POWER_BUTTON + default 38 + help + Default signal to send to init in response to the power button. + + Likely values here include 38 (SIGRTMIN+4) to power off, or 2 + (SIGINT) to simulate Ctrl+Alt+Del. + config ACPI_VIDEO tristate "Video" - depends on X86 && BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE depends on INPUT + depends on ACPI_WMI || !X86 select THERMAL help This driver implements the ACPI Extensions For Display Adapters @@ -239,11 +267,11 @@ config ACPI_DOCK config ACPI_CPU_FREQ_PSS bool - select THERMAL config ACPI_PROCESSOR_CSTATE def_bool y - depends on IA64 || X86 + depends on ACPI_PROCESSOR + depends on X86 config ACPI_PROCESSOR_IDLE bool @@ -267,9 +295,10 @@ config ACPI_CPPC_LIB config ACPI_PROCESSOR tristate "Processor" - depends on X86 || IA64 || ARM64 + depends on X86 || ARM64 || LOONGARCH || RISCV select ACPI_PROCESSOR_IDLE - select ACPI_CPU_FREQ_PSS if X86 || IA64 + select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH + select THERMAL default y help This driver adds support for the ACPI Processor package. It is required @@ -285,7 +314,7 @@ config ACPI_IPMI help This driver enables the ACPI to access the BMC controller. And it uses the IPMI request/response message to communicate with BMC - controller, which can be found on on the server. + controller, which can be found on the server. To compile this driver as a module, choose M here: the module will be called as acpi_ipmi. @@ -294,7 +323,6 @@ config ACPI_HOTPLUG_CPU bool depends on ACPI_PROCESSOR && HOTPLUG_CPU select ACPI_CONTAINER - default y config ACPI_PROCESSOR_AGGREGATOR tristate "Processor Aggregator" @@ -311,6 +339,7 @@ config ACPI_THERMAL tristate "Thermal Zone" depends on ACPI_PROCESSOR select THERMAL + select ACPI_THERMAL_LIB default y help This driver supports ACPI thermal zones. Most mobile and @@ -321,11 +350,8 @@ config ACPI_THERMAL To compile this driver as a module, choose M here: the module will be called thermal. -config ACPI_NUMA - bool "NUMA support" - depends on NUMA - depends on (X86 || IA64 || ARM64) - default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64 +config ACPI_PLATFORM_PROFILE + tristate config ACPI_CUSTOM_DSDT_FILE string "Custom DSDT Table file to include" @@ -333,7 +359,6 @@ config ACPI_CUSTOM_DSDT_FILE depends on !STANDALONE help This option supports a custom DSDT by linking it into the kernel. - See Documentation/acpi/dsdt-override.txt Enter the full path name to the file which includes the AmlCode or dsdt_aml_code declaration. @@ -355,16 +380,27 @@ config ACPI_TABLE_UPGRADE This option provides functionality to upgrade arbitrary ACPI tables via initrd. No functional change if no ACPI tables are passed via initrd, therefore it's safe to say Y. - See Documentation/acpi/initrd_table_override.txt for details + See Documentation/admin-guide/acpi/initrd_table_override.rst for details + +config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD + bool "Override ACPI tables from built-in initrd" + depends on ACPI_TABLE_UPGRADE + depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE + help + This option provides functionality to override arbitrary ACPI tables + from built-in uncompressed initrd. + + See Documentation/admin-guide/acpi/initrd_table_override.rst for details config ACPI_DEBUG bool "Debug Statements" + default y help The ACPI subsystem can produce debug output. Saying Y enables this output and increases the kernel size by around 50K. Use the acpi.debug_layer and acpi.debug_level kernel command-line - parameters documented in Documentation/acpi/debug.txt and + parameters documented in Documentation/firmware-guide/acpi/debug.rst and Documentation/admin-guide/kernel-parameters.rst to control the type and amount of debug output. @@ -386,9 +422,6 @@ config ACPI_CONTAINER This helps support hotplug of nodes, CPUs, and memory. - To compile this driver as a module, choose M here: - the module will be called container. - config ACPI_HOTPLUG_MEMORY bool "Memory Hotplug" depends on MEMORY_HOTPLUG @@ -402,9 +435,6 @@ config ACPI_HOTPLUG_MEMORY removing memory devices at runtime, you need not enable this driver. - To compile this driver as a module, choose M here: - the module will be called acpi_memhotplug. - config ACPI_HOTPLUG_IOAPIC bool depends on PCI @@ -413,7 +443,7 @@ config ACPI_HOTPLUG_IOAPIC config ACPI_SBS tristate "Smart Battery System" - depends on X86 + depends on X86 && ACPI_EC select POWER_SUPPLY help This driver supports the Smart Battery System, another @@ -423,30 +453,16 @@ config ACPI_SBS the modules will be called sbs and sbshc. config ACPI_HED - tristate "Hardware Error Device" + bool "Hardware Error Device" help This driver supports the Hardware Error Device (PNP0C33), which is used to report some hardware errors notified via SCI, mainly the corrected errors. -config ACPI_CUSTOM_METHOD - tristate "Allow ACPI methods to be inserted/replaced at run time" - depends on DEBUG_FS - help - This debug facility allows ACPI AML methods to be inserted and/or - replaced without rebooting the system. For details refer to: - Documentation/acpi/method-customizing.txt. - - NOTE: This option is security sensitive, because it allows arbitrary - kernel memory to be written to by root (uid=0) users, allowing them - to bypass certain security measures (e.g. if root is not allowed to - load additional kernel modules after boot, this feature may be used - to override that restriction). - config ACPI_BGRT bool "Boottime Graphics Resource Table support" - depends on EFI && (X86 || ARM64) - help + depends on EFI + help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain data from the firmware boot splash. It will appear under @@ -454,7 +470,6 @@ config ACPI_BGRT config ACPI_REDUCED_HARDWARE_ONLY bool "Hardware-reduced ACPI support only" if EXPERT - def_bool n help This config item changes the way the ACPI code is built. When this option is selected, the kernel will use a specialized version of @@ -464,8 +479,11 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. -source "drivers/acpi/nfit/Kconfig" +config ACPI_NHLT + bool +source "drivers/acpi/nfit/Kconfig" +source "drivers/acpi/numa/Kconfig" source "drivers/acpi/apei/Kconfig" source "drivers/acpi/dptf/Kconfig" @@ -495,77 +513,96 @@ config ACPI_EXTLOG config ACPI_ADXL bool -menuconfig PMIC_OPREGION - bool "PMIC (Power Management Integrated Circuit) operation region support" +config ACPI_CONFIGFS + tristate "ACPI configfs support" + select CONFIGFS_FS help - Select this option to enable support for ACPI operation - region of the PMIC chip. The operation region can be used - to control power rails and sensor reading/writing on the - PMIC chip. + Select this option to enable support for ACPI configuration from + userspace. The configurable ACPI groups will be visible under + /config/acpi, assuming configfs is mounted under /config. -if PMIC_OPREGION -config CRC_PMIC_OPREGION - bool "ACPI operation region support for CrystalCove PMIC" - depends on INTEL_SOC_PMIC +config ACPI_PFRUT + tristate "ACPI Platform Firmware Runtime Update and Telemetry" + depends on 64BIT help - This config adds ACPI operation region support for CrystalCove PMIC. + This mechanism allows certain pieces of the platform firmware + to be updated on the fly while the system is running (runtime) + without the need to restart it, which is key in the cases when + the system needs to be available 100% of the time and it cannot + afford the downtime related to restarting it, or when the work + carried out by the system is particularly important, so it cannot + be interrupted, and it is not practical to wait until it is complete. -config XPOWER_PMIC_OPREGION - bool "ACPI operation region support for XPower AXP288 PMIC" - depends on MFD_AXP20X_I2C && IOSF_MBI=y - help - This config adds ACPI operation region support for XPower AXP288 PMIC. + The existing firmware code can be modified (driver update) or + extended by adding new code to the firmware (code injection). -config BXT_WC_PMIC_OPREGION - bool "ACPI operation region support for BXT WhiskeyCove PMIC" - depends on INTEL_SOC_PMIC_BXTWC - help - This config adds ACPI operation region support for BXT WhiskeyCove PMIC. + Besides, the telemetry driver allows user space to fetch telemetry + data from the firmware with the help of the Platform Firmware Runtime + Telemetry interface. -config CHT_WC_PMIC_OPREGION - bool "ACPI operation region support for CHT Whiskey Cove PMIC" - depends on INTEL_SOC_PMIC_CHTWC - help - This config adds ACPI operation region support for CHT Whiskey Cove PMIC. + To compile the drivers as modules, choose M here: + the modules will be called pfr_update and pfr_telemetry. -config CHT_DC_TI_PMIC_OPREGION - bool "ACPI operation region support for Dollar Cove TI PMIC" - depends on INTEL_SOC_PMIC_CHTDC_TI - help - This config adds ACPI operation region support for Dollar Cove TI PMIC. +if ARM64 +source "drivers/acpi/arm64/Kconfig" +endif +if RISCV +source "drivers/acpi/riscv/Kconfig" endif -config ACPI_CONFIGFS - tristate "ACPI configfs support" - select CONFIGFS_FS +config ACPI_PPTT + bool + +config ACPI_PCC + bool "ACPI PCC Address Space" + depends on PCC + default y help - Select this option to enable support for ACPI configuration from - userspace. The configurable ACPI groups will be visible under - /config/acpi, assuming configfs is mounted under /config. + The PCC Address Space also referred as PCC Operation Region pertains + to the region of PCC subspace that succeeds the PCC signature. -if ARM64 -source "drivers/acpi/arm64/Kconfig" + The PCC Operation Region works in conjunction with the PCC Table + (Platform Communications Channel Table). PCC subspaces that are + marked for use as PCC Operation Regions must not be used as PCC + subspaces for the standard ACPI features such as CPPC, RASF, PDTT and + MPST. These standard features must always use the PCC Table instead. -config ACPI_PPTT + Enable this feature if you want to set up and install the PCC Address + Space handler to handle PCC OpRegion in the firmware. + +config ACPI_FFH + bool "ACPI FFH Address Space" + default n + help + The FFH(Fixed Function Hardware) Address Space also referred as FFH + Operation Region allows to define platform specific opregion. + + Enable this feature if you want to set up and install the FFH Address + Space handler to handle FFH OpRegion in the firmware. + +config ACPI_MRRM bool -endif -config TPS68470_PMIC_OPREGION - bool "ACPI operation region support for TPS68470 PMIC" - depends on MFD_TPS68470 - help - This config adds ACPI operation region support for TI TPS68470 PMIC. - TPS68470 device is an advanced power management unit that powers - a Compact Camera Module (CCM), generates clocks for image sensors, - drives a dual LED for flash and incorporates two LED drivers for - general purpose indicators. - This driver enables ACPI operation region support control voltage - regulators and clocks. - - This option is a bool as it provides an ACPI operation - region, which must be available before any of the devices - using this, are probed. +source "drivers/acpi/pmic/Kconfig" + +config ACPI_VIOT + bool + +config ACPI_PRMT + bool "Platform Runtime Mechanism Support" + depends on EFI_RUNTIME_WRAPPERS && (X86_64 || ARM64) + default y + help + Platform Runtime Mechanism (PRM) is a firmware interface exposing a + set of binary executables that can be called from the AML interpreter + or directly from device drivers. + + Say Y to enable the AML interpreter to execute the PRM code. + + While this feature is optional in principle, leaving it out may + substantially increase computational overhead related to the + initialization of some server systems. endif # ACPI diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index bb857421c2e8..d1b0affb844f 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -5,11 +5,19 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT +ifdef CONFIG_TRACE_BRANCH_PROFILING +CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING +endif + # # ACPI Boot-Time Table Parsing # +ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y) +tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ; + +endif + obj-$(CONFIG_ACPI) += tables.o -obj-$(CONFIG_X86) += blacklist.o # # ACPI Core Subsystem (Interpreter) @@ -32,35 +40,33 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o # ACPI Bus and Device Drivers # acpi-y += bus.o glue.o -acpi-y += scan.o +acpi-y += scan.o mipi-disco-img.o acpi-y += resource.o acpi-y += acpi_processor.o acpi-y += processor_core.o acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o -acpi-y += ec.o +acpi-$(CONFIG_ACPI_EC) += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o -acpi-$(CONFIG_PCI) += acpi_lpss.o acpi-y += acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o -acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o acpi-y += power.o acpi-y += event.o -acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o +acpi-y += evged.o acpi-y += sysfs.o acpi-y += property.o -acpi-$(CONFIG_X86) += acpi_cmos_rtc.o -acpi-$(CONFIG_X86) += x86/apple.o -acpi-$(CONFIG_X86) += x86/utils.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o -acpi-$(CONFIG_ACPI_NUMA) += numa.o -acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o acpi-y += acpi_lpat.o +acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o +acpi-$(CONFIG_ACPI_PRMT) += prmt.o +acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o +acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o +acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o # Address translation acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o @@ -72,14 +78,23 @@ obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o obj-$(CONFIG_ACPI_AC) += ac.o obj-$(CONFIG_ACPI_BUTTON) += button.o +obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o obj-$(CONFIG_ACPI_FAN) += fan.o +fan-objs := fan_core.o +fan-objs += fan_attr.o +fan-$(CONFIG_HWMON) += fan_hwmon.o + obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_TAD) += acpi_tad.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI) += container.o +obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o +obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o obj-$(CONFIG_ACPI_NFIT) += nfit/ +obj-$(CONFIG_ACPI_NHLT) += nhlt.o +obj-$(CONFIG_ACPI_NUMA) += numa/ obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o @@ -87,18 +102,17 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o obj-$(CONFIG_ACPI_SBS) += sbs.o obj-$(CONFIG_ACPI_HED) += hed.o obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o -obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o obj-$(CONFIG_ACPI_BGRT) += bgrt.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o obj-$(CONFIG_ACPI_PPTT) += pptt.o +obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o # processor has its own "processor." module_param namespace -processor-y := processor_driver.o +processor-y := processor_driver.o processor_thermal.o processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o -processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \ - processor_thermal.o +processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o @@ -107,18 +121,16 @@ obj-$(CONFIG_ACPI_APEI) += apei/ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o -obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o -obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o -obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o -obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o -obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o -obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += pmic/intel_pmic_chtdc_ti.o - obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o -obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o +obj-y += pmic/ video-objs += acpi_video.o video_detect.o obj-y += dptf/ obj-$(CONFIG_ARM64) += arm64/ + +obj-$(CONFIG_ACPI_VIOT) += viot.o + +obj-$(CONFIG_RISCV) += riscv/ +obj-$(CONFIG_X86) += x86/ diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index cdd3136829f1..1f69be8f51a2 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -1,24 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* - * acpi_ac.c - ACPI AC Adapter Driver ($Revision: 27 $) + * acpi_ac.c - ACPI AC Adapter Driver (Revision: 27) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: AC: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -26,17 +15,12 @@ #include <linux/types.h> #include <linux/dmi.h> #include <linux/delay.h> -#ifdef CONFIG_ACPI_PROCFS_POWER -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#endif #include <linux/platform_device.h> #include <linux/power_supply.h> +#include <linux/string_choices.h> #include <linux/acpi.h> #include <acpi/battery.h> -#define PREFIX "ACPI: " - #define ACPI_AC_CLASS "ac_adapter" #define ACPI_AC_DEVICE_NAME "AC Adapter" #define ACPI_AC_FILE_STATE "state" @@ -45,22 +29,14 @@ #define ACPI_AC_STATUS_ONLINE 0x01 #define ACPI_AC_STATUS_UNKNOWN 0xFF -#define _COMPONENT ACPI_AC_COMPONENT -ACPI_MODULE_NAME("ac"); - MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); +static int acpi_ac_probe(struct platform_device *pdev); +static void acpi_ac_remove(struct platform_device *pdev); -static int acpi_ac_add(struct acpi_device *device); -static int acpi_ac_remove(struct acpi_device *device); -static void acpi_ac_notify(struct acpi_device *device, u32 event); - -struct acpi_ac_bl { - const char *hid; - int hrv; -}; +static void acpi_ac_notify(acpi_handle handle, u32 event, void *data); static const struct acpi_device_id ac_device_ids[] = { {"ACPI0003", 0}, @@ -68,53 +44,25 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); -/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */ -static const struct acpi_ac_bl acpi_ac_blacklist[] = { - { "INT33F4", -1 }, /* X-Powers AXP288 PMIC */ - { "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */ -}; - #ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); -#ifdef CONFIG_ACPI_PROCFS_POWER -extern struct proc_dir_entry *acpi_lock_ac_dir(void); -extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); -#endif - - static int ac_sleep_before_get_state_ms; -static int ac_check_pmic = 1; - -static struct acpi_driver acpi_ac_driver = { - .name = "ac", - .class = ACPI_AC_CLASS, - .ids = ac_device_ids, - .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, - .ops = { - .add = acpi_ac_add, - .remove = acpi_ac_remove, - .notify = acpi_ac_notify, - }, - .drv.pm = &acpi_ac_pm, -}; +static int ac_only; struct acpi_ac { struct power_supply *charger; struct power_supply_desc charger_desc; - struct acpi_device * device; + struct acpi_device *device; unsigned long long state; struct notifier_block battery_nb; }; #define to_acpi_ac(x) power_supply_get_drvdata(x) -/* -------------------------------------------------------------------------- - AC Adapter Management - -------------------------------------------------------------------------- */ - +/* AC Adapter Management */ static int acpi_ac_get_state(struct acpi_ac *ac) { acpi_status status = AE_OK; @@ -122,11 +70,17 @@ static int acpi_ac_get_state(struct acpi_ac *ac) if (!ac) return -EINVAL; + if (ac_only) { + ac->state = 1; + return 0; + } + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Error reading AC Adapter state")); + acpi_handle_info(ac->device->handle, + "Error reading AC Adapter state: %s\n", + acpi_format_exception(status)); ac->state = ACPI_AC_STATUS_UNKNOWN; return -ENODEV; } @@ -134,9 +88,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac) return 0; } -/* -------------------------------------------------------------------------- - sysfs I/F - -------------------------------------------------------------------------- */ +/* sysfs I/F */ static int get_ac_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -156,100 +108,25 @@ static int get_ac_property(struct power_supply *psy, default: return -EINVAL; } + return 0; } -static enum power_supply_property ac_props[] = { +static const enum power_supply_property ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; -#ifdef CONFIG_ACPI_PROCFS_POWER -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -static struct proc_dir_entry *acpi_ac_dir; - -static int acpi_ac_seq_show(struct seq_file *seq, void *offset) +/* Driver Model */ +static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) { - struct acpi_ac *ac = seq->private; - - - if (!ac) - return 0; - - if (acpi_ac_get_state(ac)) { - seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); - return 0; - } - - seq_puts(seq, "state: "); - switch (ac->state) { - case ACPI_AC_STATUS_OFFLINE: - seq_puts(seq, "off-line\n"); - break; - case ACPI_AC_STATUS_ONLINE: - seq_puts(seq, "on-line\n"); - break; - default: - seq_puts(seq, "unknown\n"); - break; - } - - return 0; -} - -static int acpi_ac_add_fs(struct acpi_ac *ac) -{ - struct proc_dir_entry *entry = NULL; - - printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," - " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); - if (!acpi_device_dir(ac->device)) { - acpi_device_dir(ac->device) = - proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); - if (!acpi_device_dir(ac->device)) - return -ENODEV; - } - - /* 'state' [R] */ - entry = proc_create_single_data(ACPI_AC_FILE_STATE, S_IRUGO, - acpi_device_dir(ac->device), acpi_ac_seq_show, ac); - if (!entry) - return -ENODEV; - return 0; -} - -static int acpi_ac_remove_fs(struct acpi_ac *ac) -{ - - if (acpi_device_dir(ac->device)) { - remove_proc_entry(ACPI_AC_FILE_STATE, - acpi_device_dir(ac->device)); - remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); - acpi_device_dir(ac->device) = NULL; - } - - return 0; -} -#endif - -/* -------------------------------------------------------------------------- - Driver Model - -------------------------------------------------------------------------- */ - -static void acpi_ac_notify(struct acpi_device *device, u32 event) -{ - struct acpi_ac *ac = acpi_driver_data(device); - - if (!ac) - return; + struct acpi_ac *ac = data; + struct acpi_device *adev = ac->device; switch (event) { default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); - /* fall through */ + acpi_handle_debug(adev->handle, "Unsupported event [0x%x]\n", + event); + fallthrough; case ACPI_AC_NOTIFY_STATUS: case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: @@ -264,14 +141,12 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - acpi_bus_generate_netlink_event(device->pnp.device_class, - dev_name(&device->dev), event, + acpi_bus_generate_netlink_event(adev->pnp.device_class, + dev_name(&adev->dev), event, (u32) ac->state); - acpi_notifier_call_chain(device, event, (u32) ac->state); - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + acpi_notifier_call_chain(adev, event, (u32) ac->state); + power_supply_changed(ac->charger); } - - return; } static int acpi_ac_battery_notify(struct notifier_block *nb, @@ -283,7 +158,7 @@ static int acpi_ac_battery_notify(struct notifier_block *nb, /* * On HP Pavilion dv6-6179er AC status notifications aren't triggered * when adapter is plugged/unplugged. However, battery status - * notifcations are triggered when battery starts charging or + * notifications are triggered when battery starts charging or * discharging. Re-reading AC status triggers lost AC notifications, * if AC status has changed. */ @@ -300,95 +175,85 @@ static int __init thinkpad_e530_quirk(const struct dmi_system_id *d) return 0; } -static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d) +static int __init ac_only_quirk(const struct dmi_system_id *d) { - ac_check_pmic = 0; + ac_only = 1; return 0; } +/* Please keep this list alphabetically sorted */ static const struct dmi_system_id ac_dmi_table[] __initconst = { { - /* Thinkpad e530 */ - .callback = thinkpad_e530_quirk, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), - }, - }, - { - /* ECS EF20EA */ - .callback = ac_do_not_check_pmic_quirk, + /* Kodlix GK45 returning incorrect state */ + .callback = ac_only_quirk, .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), + DMI_MATCH(DMI_PRODUCT_NAME, "GK45"), }, }, { - /* Lenovo Ideapad Miix 320 */ - .callback = ac_do_not_check_pmic_quirk, + /* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */ + .callback = thinkpad_e530_quirk, .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), }, }, {}, }; -static int acpi_ac_add(struct acpi_device *device) +static int acpi_ac_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct power_supply_config psy_cfg = {}; - int result = 0; - struct acpi_ac *ac = NULL; - - - if (!device) - return -EINVAL; + struct acpi_ac *ac; + int result; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); if (!ac) return -ENOMEM; - ac->device = device; - strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_AC_CLASS); - device->driver_data = ac; + ac->device = adev; + strscpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); + strscpy(acpi_device_class(adev), ACPI_AC_CLASS); + + platform_set_drvdata(pdev, ac); result = acpi_ac_get_state(ac); if (result) - goto end; + goto err_release_ac; psy_cfg.drv_data = ac; - ac->charger_desc.name = acpi_device_bid(device); -#ifdef CONFIG_ACPI_PROCFS_POWER - result = acpi_ac_add_fs(ac); - if (result) - goto end; -#endif + ac->charger_desc.name = acpi_device_bid(adev); ac->charger_desc.type = POWER_SUPPLY_TYPE_MAINS; ac->charger_desc.properties = ac_props; ac->charger_desc.num_properties = ARRAY_SIZE(ac_props); ac->charger_desc.get_property = get_ac_property; - ac->charger = power_supply_register(&ac->device->dev, + ac->charger = power_supply_register(&pdev->dev, &ac->charger_desc, &psy_cfg); if (IS_ERR(ac->charger)) { result = PTR_ERR(ac->charger); - goto end; + goto err_release_ac; } - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", - acpi_device_name(device), acpi_device_bid(device), - ac->state ? "on-line" : "off-line"); + pr_info("%s [%s] (%s-line)\n", acpi_device_name(adev), + acpi_device_bid(adev), str_on_off(ac->state)); ac->battery_nb.notifier_call = acpi_ac_battery_notify; register_acpi_notifier(&ac->battery_nb); -end: - if (result) { -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_ac_remove_fs(ac); -#endif - kfree(ac); - } + + result = acpi_dev_install_notify_handler(adev, ACPI_ALL_NOTIFY, + acpi_ac_notify, ac); + if (result) + goto err_unregister; + + return 0; + +err_unregister: + power_supply_unregister(ac->charger); + unregister_acpi_notifier(&ac->battery_nb); +err_release_ac: + kfree(ac); return result; } @@ -396,93 +261,65 @@ end: #ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev) { - struct acpi_ac *ac; - unsigned old_state; - - if (!dev) - return -EINVAL; - - ac = acpi_driver_data(to_acpi_device(dev)); - if (!ac) - return -EINVAL; + struct acpi_ac *ac = dev_get_drvdata(dev); + unsigned int old_state; old_state = ac->state; if (acpi_ac_get_state(ac)) return 0; if (old_state != ac->state) - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(ac->charger); + return 0; } #else #define acpi_ac_resume NULL #endif -static int acpi_ac_remove(struct acpi_device *device) +static void acpi_ac_remove(struct platform_device *pdev) { - struct acpi_ac *ac = NULL; - - - if (!device || !acpi_driver_data(device)) - return -EINVAL; - - ac = acpi_driver_data(device); + struct acpi_ac *ac = platform_get_drvdata(pdev); + acpi_dev_remove_notify_handler(ac->device, ACPI_ALL_NOTIFY, + acpi_ac_notify); power_supply_unregister(ac->charger); unregister_acpi_notifier(&ac->battery_nb); -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_ac_remove_fs(ac); -#endif - kfree(ac); - - return 0; } +static struct platform_driver acpi_ac_driver = { + .probe = acpi_ac_probe, + .remove = acpi_ac_remove, + .driver = { + .name = "ac", + .acpi_match_table = ac_device_ids, + .pm = &acpi_ac_pm, + }, +}; + static int __init acpi_ac_init(void) { - unsigned int i; int result; if (acpi_disabled) return -ENODEV; - dmi_check_system(ac_dmi_table); - - if (ac_check_pmic) { - for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++) - if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1", - acpi_ac_blacklist[i].hrv)) { - pr_info(PREFIX "AC: found native %s PMIC, not loading\n", - acpi_ac_blacklist[i].hid); - return -ENODEV; - } - } - -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_ac_dir = acpi_lock_ac_dir(); - if (!acpi_ac_dir) + if (acpi_quirk_skip_acpi_ac_and_battery()) return -ENODEV; -#endif + dmi_check_system(ac_dmi_table); - result = acpi_bus_register_driver(&acpi_ac_driver); - if (result < 0) { -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_unlock_ac_dir(acpi_ac_dir); -#endif + result = platform_driver_register(&acpi_ac_driver); + if (result < 0) return -ENODEV; - } return 0; } static void __exit acpi_ac_exit(void) { - acpi_bus_unregister_driver(&acpi_ac_driver); -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_unlock_ac_dir(acpi_ac_dir); -#endif + platform_driver_unregister(&acpi_ac_driver); } module_init(acpi_ac_init); module_exit(acpi_ac_exit); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index ddf598ae8b6b..49539f7528c6 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -1,47 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * AMD ACPI support for ACPI2platform device. * * Copyright (c) 2014,2015 AMD Corporation. * Authors: Ken Xue <Ken.Xue@amd.com> * Wu, Jeff <Jeff.Wu@amd.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ -#include <linux/clk-provider.h> -#include <linux/platform_data/clk-st.h> -#include <linux/platform_device.h> -#include <linux/pm_domain.h> -#include <linux/clkdev.h> #include <linux/acpi.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> -#include <linux/pm.h> +#include <linux/io.h> +#include <linux/platform_data/clk-fch.h> +#include <linux/platform_device.h> #include "internal.h" -ACPI_MODULE_NAME("acpi_apd"); struct apd_private_data; /** - * ACPI_APD_SYSFS : add device attributes in sysfs - * ACPI_APD_PM : attach power domain to device - */ -#define ACPI_APD_SYSFS BIT(0) -#define ACPI_APD_PM BIT(1) - -/** * struct apd_device_desc - a descriptor for apd device - * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM * @fixed_clk_rate: fixed rate input clock source for acpi device; * 0 means no fixed rate input clock source + * @properties: build-in properties of the device such as UART * @setup: a hook routine to set device resource during create platform device * * Device description defined as acpi_device_id.driver_data */ struct apd_device_desc { - unsigned int flags; unsigned int fixed_clk_rate; struct property_entry *properties; int (*setup)(struct apd_private_data *pdata); @@ -59,7 +46,7 @@ struct apd_private_data { static int acpi_apd_setup(struct apd_private_data *pdata) { const struct apd_device_desc *dev_desc = pdata->dev_desc; - struct clk *clk = ERR_PTR(-ENODEV); + struct clk *clk; if (dev_desc->fixed_clk_rate) { clk = clk_register_fixed_rate(&pdata->adev->dev, @@ -74,18 +61,12 @@ static int acpi_apd_setup(struct apd_private_data *pdata) #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE -static int misc_check_res(struct acpi_resource *ares, void *data) -{ - struct resource res; - - return !acpi_dev_resource_memory(ares, &res); -} - -static int st_misc_setup(struct apd_private_data *pdata) +static int fch_misc_setup(struct apd_private_data *pdata) { struct acpi_device *adev = pdata->adev; + const union acpi_object *obj; struct platform_device *clkdev; - struct st_clk_data *clk_data; + struct fch_clk_data *clk_data; struct resource_entry *rentry; struct list_head resource_list; int ret; @@ -95,20 +76,33 @@ static int st_misc_setup(struct apd_private_data *pdata) return -ENOMEM; INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, misc_check_res, - NULL); + ret = acpi_dev_get_memory_resources(adev, &resource_list); if (ret < 0) return -ENOENT; + if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) { + clk_data->name = devm_kzalloc(&adev->dev, obj->string.length, + GFP_KERNEL); + if (!clk_data->name) + return -ENOMEM; + + strscpy(clk_data->name, obj->string.pointer, obj->string.length); + } else { + /* Set default name to mclk if entry missing in firmware */ + clk_data->name = "mclk"; + } + list_for_each_entry(rentry, &resource_list, node) { clk_data->base = devm_ioremap(&adev->dev, rentry->res->start, resource_size(rentry->res)); break; } + if (!clk_data->base) + return -ENOMEM; acpi_dev_free_resource_list(&resource_list); - clkdev = platform_device_register_data(&adev->dev, "clk-st", + clkdev = platform_device_register_data(&adev->dev, "clk-fch", PLATFORM_DEVID_NONE, clk_data, sizeof(*clk_data)); return PTR_ERR_OR_ZERO(clkdev); @@ -124,6 +118,11 @@ static const struct apd_device_desc wt_i2c_desc = { .fixed_clk_rate = 150000000, }; +static const struct apd_device_desc wt_i3c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +}; + static struct property_entry uart_properties[] = { PROPERTY_ENTRY_U32("reg-io-width", 4), PROPERTY_ENTRY_U32("reg-shift", 2), @@ -137,10 +136,10 @@ static const struct apd_device_desc cz_uart_desc = { .properties = uart_properties, }; -static const struct apd_device_desc st_misc_desc = { - .setup = st_misc_setup, +static const struct apd_device_desc fch_misc_desc = { + .setup = fch_misc_setup, }; -#endif +#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */ #ifdef CONFIG_ARM64 static const struct apd_device_desc xgene_i2c_desc = { @@ -162,27 +161,34 @@ static const struct apd_device_desc hip08_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 250000000, }; + +static const struct apd_device_desc hip08_lite_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +}; + static const struct apd_device_desc thunderx2_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 125000000, }; +static const struct apd_device_desc nxp_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 350000000, +}; + static const struct apd_device_desc hip08_spi_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 250000000, }; -#endif +#endif /* CONFIG_ARM64 */ -#else - -#define APD_ADDR(desc) (0UL) - -#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */ +#endif -/** -* Create platform device during acpi scan attach handle. -* Return value > 0 on success of creating device. -*/ +/* + * Create platform device during acpi scan attach handle. + * Return value > 0 on success of creating device. + */ static int acpi_apd_create_device(struct acpi_device *adev, const struct acpi_device_id *id) { @@ -226,11 +232,15 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { /* Generic apd devices */ #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE { "AMD0010", APD_ADDR(cz_i2c_desc) }, - { "AMDI0010", APD_ADDR(wt_i2c_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) }, - { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, - { "AMD0040", APD_ADDR(st_misc_desc)}, + { "AMD0040", APD_ADDR(fch_misc_desc)}, + { "AMDI0010", APD_ADDR(wt_i2c_desc) }, + { "AMDI0015", APD_ADDR(wt_i3c_desc) }, + { "AMDI0019", APD_ADDR(wt_i2c_desc) }, + { "AMDI0020", APD_ADDR(cz_uart_desc) }, + { "AMDI0022", APD_ADDR(cz_uart_desc) }, + { "HYGO0010", APD_ADDR(wt_i2c_desc) }, #endif #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, @@ -239,7 +249,9 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, + { "NXP0001", APD_ADDR(nxp_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index b58850389094..c970792b11a4 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI configfs support * * Copyright (c) 2016 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. */ #define pr_fmt(fmt) "ACPI configfs: " fmt @@ -14,9 +11,7 @@ #include <linux/module.h> #include <linux/configfs.h> #include <linux/acpi.h> - -#include "acpica/accommon.h" -#include "acpica/actables.h" +#include <linux/security.h> static struct config_group *acpi_table_group; @@ -31,7 +26,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, { const struct acpi_table_header *header = data; struct acpi_table *table; - int ret; + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); + + if (ret) + return ret; table = container_of(cfg, struct acpi_table, cfg); @@ -56,11 +54,7 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, if (!table->header) return -ENOMEM; - ACPI_INFO(("Host-directed Dynamic ACPI Table Load:")); - ret = acpi_tb_install_and_load_table( - ACPI_PTR_TO_PHYSADDR(table->header), - ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, FALSE, - &table->index); + ret = acpi_load_table(table->header, &table->index); if (ret) { kfree(table->header); table->header = NULL; @@ -76,7 +70,7 @@ static inline struct acpi_table_header *get_header(struct config_item *cfg) if (!table->header) pr_err("table not loaded\n"); - return table->header; + return table->header ?: ERR_PTR(-EINVAL); } static ssize_t acpi_table_aml_read(struct config_item *cfg, @@ -84,8 +78,8 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg, { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); if (data) memcpy(data, h, h->length); @@ -97,90 +91,91 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg, CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE); -struct configfs_bin_attribute *acpi_table_bin_attrs[] = { +static struct configfs_bin_attribute *acpi_table_bin_attrs[] = { &acpi_table_attr_aml, NULL, }; -ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature); + return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature); } -ssize_t acpi_table_length_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_length_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%d\n", h->length); + return sysfs_emit(str, "%d\n", h->length); } -ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%d\n", h->revision); + return sysfs_emit(str, "%d\n", h->revision); } -ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); + return sysfs_emit(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); } -ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); + return sysfs_emit(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); } -ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%d\n", h->oem_revision); + return sysfs_emit(str, "%d\n", h->oem_revision); } -ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str) +static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, + char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id); + return sysfs_emit(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id); } -ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg, - char *str) +static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg, + char *str) { struct acpi_table_header *h = get_header(cfg); - if (!h) - return -EINVAL; + if (IS_ERR(h)) + return PTR_ERR(h); - return sprintf(str, "%d\n", h->asl_compiler_revision); + return sysfs_emit(str, "%d\n", h->asl_compiler_revision); } CONFIGFS_ATTR_RO(acpi_table_, signature); @@ -192,7 +187,7 @@ CONFIGFS_ATTR_RO(acpi_table_, oem_revision); CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id); CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision); -struct configfs_attribute *acpi_table_attrs[] = { +static struct configfs_attribute *acpi_table_attrs[] = { &acpi_table_attr_signature, &acpi_table_attr_length, &acpi_table_attr_revision, @@ -228,11 +223,12 @@ static void acpi_table_drop_item(struct config_group *group, { struct acpi_table *table = container_of(cfg, struct acpi_table, cfg); - ACPI_INFO(("Host-directed Dynamic ACPI Table Unload")); - acpi_tb_unload_table(table->index); + pr_debug("Host-directed Dynamic ACPI Table Unload\n"); + acpi_unload_table(table->index); + config_item_put(cfg); } -struct configfs_group_operations acpi_table_group_ops = { +static struct configfs_group_operations acpi_table_group_ops = { .make_item = acpi_table_make_item, .drop_item = acpi_table_drop_item, }; @@ -269,7 +265,12 @@ static int __init acpi_configfs_init(void) acpi_table_group = configfs_register_default_group(root, "table", &acpi_tables_type); - return PTR_ERR_OR_ZERO(acpi_table_group); + if (IS_ERR(acpi_table_group)) { + configfs_unregister_subsystem(&acpi_configfs); + return PTR_ERR(acpi_table_group); + } + + return 0; } module_init(acpi_configfs_init); diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index a2dcd62ea32f..515b20d0b698 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI AML interfacing support * * Copyright (C) 2015, Intel Corporation * Authors: Lv Zheng <lv.zheng@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ /* #define DEBUG */ @@ -120,13 +117,6 @@ static inline bool __acpi_aml_busy(void) return false; } -static inline bool __acpi_aml_opened(void) -{ - if (acpi_aml_io.flags & ACPI_AML_OPEN) - return true; - return false; -} - static inline bool __acpi_aml_used(void) { return acpi_aml_io.usages ? true : false; @@ -390,7 +380,7 @@ again: return size > 0 ? size : ret; } -static int acpi_aml_thread(void *unsed) +static int acpi_aml_thread(void *unused) { acpi_osd_exec_callback function = NULL; void *context; @@ -579,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file) return 0; } -static int acpi_aml_read_user(char __user *buf, int len) +static ssize_t acpi_aml_read_user(char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.out_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); @@ -592,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len) /* sync head before removing logs */ smp_rmb(); p = &crc->buf[crc->tail]; - n = min(len, circ_count_to_end(crc)); + n = min_t(size_t, len, circ_count_to_end(crc)); if (copy_to_user(buf, p, n)) { ret = -EFAULT; goto out; @@ -609,8 +599,8 @@ out: static ssize_t acpi_aml_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; @@ -649,11 +639,11 @@ again: return size > 0 ? size : ret; } -static int acpi_aml_write_user(const char __user *buf, int len) +static ssize_t acpi_aml_write_user(const char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.in_crc; - int n; + ssize_t ret; + size_t n; char *p; ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); @@ -662,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) /* sync tail before inserting cmds */ smp_mb(); p = &crc->buf[crc->head]; - n = min(len, circ_space_to_end(crc)); + n = min_t(size_t, len, circ_space_to_end(crc)); if (copy_from_user(p, buf, n)) { ret = -EFAULT; goto out; @@ -673,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len) ret = n; out: acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); - return n; + return ret; } static ssize_t acpi_aml_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0; if (!count) return 0; @@ -748,50 +738,41 @@ static const struct acpi_debugger_ops acpi_aml_debugger = { .notify_command_complete = acpi_aml_notify_command_complete, }; -int __init acpi_aml_init(void) +static int __init acpi_aml_init(void) { - int ret = 0; + int ret; - if (!acpi_debugfs_dir) { - ret = -ENOENT; - goto err_exit; - } + if (acpi_disabled) + return -ENODEV; /* Initialize AML IO interface */ mutex_init(&acpi_aml_io.lock); init_waitqueue_head(&acpi_aml_io.wait); acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf; acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf; + acpi_aml_dentry = debugfs_create_file("acpidbg", S_IFREG | S_IRUGO | S_IWUSR, acpi_debugfs_dir, NULL, &acpi_aml_operations); - if (acpi_aml_dentry == NULL) { - ret = -ENODEV; - goto err_exit; - } - ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); - if (ret) - goto err_fs; - acpi_aml_initialized = true; -err_fs: + ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); if (ret) { debugfs_remove(acpi_aml_dentry); acpi_aml_dentry = NULL; + return ret; } -err_exit: - return ret; + + acpi_aml_initialized = true; + return 0; } -void __exit acpi_aml_exit(void) +static void __exit acpi_aml_exit(void) { if (acpi_aml_initialized) { acpi_unregister_debugger(&acpi_aml_debugger); - if (acpi_aml_dentry) { - debugfs_remove(acpi_aml_dentry); - acpi_aml_dentry = NULL; - } + debugfs_remove(acpi_aml_dentry); + acpi_aml_dentry = NULL; acpi_aml_initialized = false; } } diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 560fdae8cc59..f6b9562779de 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -1,10 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Extended Error Log driver * * Copyright (C) 2013 Intel Corp. * Author: Chen, Gong <gong.chen@intel.com> - * - * This file is licensed under GPLv2. */ #include <linux/module.h> @@ -13,8 +12,10 @@ #include <linux/ratelimit.h> #include <linux/edac.h> #include <linux/ras.h> +#include <acpi/ghes.h> #include <asm/cpu.h> #include <asm/mce.h> +#include <asm/msr.h> #include "apei/apei-internal.h" #include <ras/ras_event.h> @@ -43,8 +44,6 @@ struct extlog_l1_head { u8 rev1[12]; }; -static int old_edac_report_status; - static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295"; /* L1 table related physical address */ @@ -141,15 +140,20 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, int cpu = mce->extcpu; struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_data *gdata; - const guid_t *fru_id = &guid_null; - char *fru_text = ""; + const guid_t *fru_id; + char *fru_text; guid_t *sec_type; static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); - if (estatus == NULL) + if (!estatus) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) { + estatus->block_status = 0; + return NOTIFY_DONE; + } + memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); /* clear record status to enable BIOS to update it again */ estatus->block_status = 0; @@ -163,21 +167,28 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* log event via trace */ err_seq++; - gdata = (struct acpi_hest_generic_data *)(tmp + 1); - if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - fru_id = (guid_t *)gdata->fru_id; - if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) - fru_text = gdata->fru_text; - sec_type = (guid_t *)gdata->section_type; - if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { - struct cper_sec_mem_err *mem = (void *)(gdata + 1); - if (gdata->error_data_length >= sizeof(*mem)) - trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, - (u8)gdata->error_severity); + apei_estatus_for_each_section(tmp, gdata) { + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (guid_t *)gdata->fru_id; + else + fru_id = &guid_null; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + else + fru_text = ""; + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = acpi_hest_get_payload(gdata); + + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } } out: - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EXTLOG; + return NOTIFY_OK; } static bool __init extlog_get_l1addr(void) @@ -224,16 +235,11 @@ static int __init extlog_init(void) u64 cap; int rc; - rdmsrl(MSR_IA32_MCG_CAP, cap); - - if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) + if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) || + !(cap & MCG_ELOG_P) || + !extlog_get_l1addr()) return -ENODEV; - if (edac_get_report_status() == EDAC_REPORTING_FORCE) { - pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n"); - return -EPERM; - } - rc = -EINVAL; /* get L1 header to fetch necessary information */ l1_hdr_size = sizeof(struct extlog_l1_head); @@ -246,6 +252,10 @@ static int __init extlog_init(void) } extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size); + if (!extlog_l1_hdr) { + rc = -ENOMEM; + goto err_release_l1_hdr; + } l1_head = (struct extlog_l1_head *)extlog_l1_hdr; l1_size = l1_head->total_len; l1_percpu_entry = l1_head->entries; @@ -263,6 +273,10 @@ static int __init extlog_init(void) goto err; } extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size); + if (!extlog_l1_addr) { + rc = -ENOMEM; + goto err_release_l1_dir; + } l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size); /* remap elog table */ @@ -274,6 +288,10 @@ static int __init extlog_init(void) goto err_release_l1_dir; } elog_addr = acpi_os_map_iomem(elog_base, elog_size); + if (!elog_addr) { + rc = -ENOMEM; + goto err_release_elog; + } rc = -ENOMEM; /* allocate buffer to save elog record */ @@ -281,12 +299,6 @@ static int __init extlog_init(void) if (elog_buf == NULL) goto err_release_elog; - /* - * eMCA event report method has higher priority than EDAC method, - * unless EDAC event report method is mandatory. - */ - old_edac_report_status = edac_get_report_status(); - edac_set_report_status(EDAC_REPORTING_DISABLED); mce_register_decode_chain(&extlog_mce_dec); /* enable OS to be involved to take over management from BIOS */ ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN; @@ -301,6 +313,8 @@ err_release_l1_dir: if (extlog_l1_addr) acpi_os_unmap_iomem(extlog_l1_addr, l1_size); release_mem_region(l1_dirbase, l1_size); +err_release_l1_hdr: + release_mem_region(l1_dirbase, l1_hdr_size); err: pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n"); return rc; @@ -308,11 +322,11 @@ err: static void __exit extlog_exit(void) { - edac_set_report_status(old_edac_report_status); mce_unregister_decode_chain(&extlog_mce_dec); - ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; - if (extlog_l1_addr) + if (extlog_l1_addr) { + ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; acpi_os_unmap_iomem(extlog_l1_addr, l1_size); + } if (elog_addr) acpi_os_unmap_iomem(elog_addr, elog_size); release_mem_region(elog_base, elog_size); diff --git a/drivers/acpi/acpi_ffh.c b/drivers/acpi/acpi_ffh.c new file mode 100644 index 000000000000..8d5126963dc7 --- /dev/null +++ b/drivers/acpi/acpi_ffh.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Sudeep Holla <sudeep.holla@arm.com> + * Copyright 2022 Arm Limited + */ +#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/io.h> + +static struct acpi_ffh_info ffh_ctx; + +int __weak acpi_ffh_address_space_arch_setup(void *handler_ctxt, + void **region_ctxt) +{ + return -EOPNOTSUPP; +} + +int __weak acpi_ffh_address_space_arch_handler(acpi_integer *value, + void *region_context) +{ + return -EOPNOTSUPP; +} + +static acpi_status +acpi_ffh_address_space_setup(acpi_handle region_handle, u32 function, + void *handler_context, void **region_context) +{ + return acpi_ffh_address_space_arch_setup(handler_context, + region_context); +} + +static acpi_status +acpi_ffh_address_space_handler(u32 function, acpi_physical_address addr, + u32 bits, acpi_integer *value, + void *handler_context, void *region_context) +{ + return acpi_ffh_address_space_arch_handler(value, region_context); +} + +void __init acpi_init_ffh(void) +{ + acpi_status status; + + status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_FIXED_HARDWARE, + &acpi_ffh_address_space_handler, + &acpi_ffh_address_space_setup, + &ffh_ctx); + if (ACPI_FAILURE(status)) + pr_alert("OperationRegion handler could not be installed\n"); +} diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c new file mode 100644 index 000000000000..271092f2700a --- /dev/null +++ b/drivers/acpi/acpi_fpdt.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * FPDT support for exporting boot and suspend/resume performance data + * + * Copyright (C) 2021 Intel Corporation. All rights reserved. + */ + +#define pr_fmt(fmt) "ACPI FPDT: " fmt + +#include <linux/acpi.h> + +/* + * FPDT contains ACPI table header and a number of fpdt_subtable_entries. + * Each fpdt_subtable_entry points to a subtable: FBPT or S3PT. + * Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header + * and a number of fpdt performance records. + * Each FPDT performance record is composed of a fpdt_record_header and + * performance data fields, for boot or suspend or resume phase. + */ +enum fpdt_subtable_type { + SUBTABLE_FBPT, + SUBTABLE_S3PT, +}; + +struct fpdt_subtable_entry { + u16 type; /* refer to enum fpdt_subtable_type */ + u8 length; + u8 revision; + u32 reserved; + u64 address; /* physical address of the S3PT/FBPT table */ +}; + +struct fpdt_subtable_header { + u32 signature; + u32 length; +}; + +enum fpdt_record_type { + RECORD_S3_RESUME, + RECORD_S3_SUSPEND, + RECORD_BOOT, +}; + +struct fpdt_record_header { + u16 type; /* refer to enum fpdt_record_type */ + u8 length; + u8 revision; +}; + +struct resume_performance_record { + struct fpdt_record_header header; + u32 resume_count; + u64 resume_prev; + u64 resume_avg; +} __attribute__((packed)); + +struct boot_performance_record { + struct fpdt_record_header header; + u32 reserved; + u64 firmware_start; + u64 bootloader_load; + u64 bootloader_launch; + u64 exitbootservice_start; + u64 exitbootservice_end; +} __attribute__((packed)); + +struct suspend_performance_record { + struct fpdt_record_header header; + u64 suspend_start; + u64 suspend_end; +} __attribute__((packed)); + + +static struct resume_performance_record *record_resume; +static struct suspend_performance_record *record_suspend; +static struct boot_performance_record *record_boot; + +#define FPDT_ATTR(phase, name) \ +static ssize_t name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%llu\n", record_##phase->name); \ +} \ +static struct kobj_attribute name##_attr = \ +__ATTR(name##_ns, 0444, name##_show, NULL) + +FPDT_ATTR(resume, resume_prev); +FPDT_ATTR(resume, resume_avg); +FPDT_ATTR(suspend, suspend_start); +FPDT_ATTR(suspend, suspend_end); +FPDT_ATTR(boot, firmware_start); +FPDT_ATTR(boot, bootloader_load); +FPDT_ATTR(boot, bootloader_launch); +FPDT_ATTR(boot, exitbootservice_start); +FPDT_ATTR(boot, exitbootservice_end); + +static ssize_t resume_count_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", record_resume->resume_count); +} + +static struct kobj_attribute resume_count_attr = +__ATTR_RO(resume_count); + +static struct attribute *resume_attrs[] = { + &resume_count_attr.attr, + &resume_prev_attr.attr, + &resume_avg_attr.attr, + NULL +}; + +static const struct attribute_group resume_attr_group = { + .attrs = resume_attrs, + .name = "resume", +}; + +static struct attribute *suspend_attrs[] = { + &suspend_start_attr.attr, + &suspend_end_attr.attr, + NULL +}; + +static const struct attribute_group suspend_attr_group = { + .attrs = suspend_attrs, + .name = "suspend", +}; + +static struct attribute *boot_attrs[] = { + &firmware_start_attr.attr, + &bootloader_load_attr.attr, + &bootloader_launch_attr.attr, + &exitbootservice_start_attr.attr, + &exitbootservice_end_attr.attr, + NULL +}; + +static const struct attribute_group boot_attr_group = { + .attrs = boot_attrs, + .name = "boot", +}; + +static struct kobject *fpdt_kobj; + +#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT +#include <linux/processor.h> +static bool fpdt_address_valid(u64 address) +{ + /* + * On some systems the table contains invalid addresses + * with unsuppored high address bits set, check for this. + */ + return !(address >> boot_cpu_data.x86_phys_bits); +} +#else +static bool fpdt_address_valid(u64 address) +{ + return true; +} +#endif + +static int fpdt_process_subtable(u64 address, u32 subtable_type) +{ + struct fpdt_subtable_header *subtable_header; + struct fpdt_record_header *record_header; + char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT"); + u32 length, offset; + int result; + + if (!fpdt_address_valid(address)) { + pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address); + return -EINVAL; + } + + subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); + if (!subtable_header) + return -ENOMEM; + + if (strncmp((char *)&subtable_header->signature, signature, 4)) { + pr_info(FW_BUG "subtable signature and type mismatch!\n"); + return -EINVAL; + } + + length = subtable_header->length; + acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header)); + + subtable_header = acpi_os_map_memory(address, length); + if (!subtable_header) + return -ENOMEM; + + offset = sizeof(*subtable_header); + while (offset < length) { + record_header = (void *)subtable_header + offset; + offset += record_header->length; + + if (!record_header->length) { + pr_err(FW_BUG "Zero-length record found in FPTD.\n"); + result = -EINVAL; + goto err; + } + + switch (record_header->type) { + case RECORD_S3_RESUME: + if (subtable_type != SUBTABLE_S3PT) { + pr_err(FW_BUG "Invalid record %d for subtable %s\n", + record_header->type, signature); + result = -EINVAL; + goto err; + } + if (record_resume) { + pr_err("Duplicate resume performance record found.\n"); + continue; + } + record_resume = (struct resume_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &resume_attr_group); + if (result) + goto err; + break; + case RECORD_S3_SUSPEND: + if (subtable_type != SUBTABLE_S3PT) { + pr_err(FW_BUG "Invalid %d for subtable %s\n", + record_header->type, signature); + continue; + } + if (record_suspend) { + pr_err("Duplicate suspend performance record found.\n"); + continue; + } + record_suspend = (struct suspend_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &suspend_attr_group); + if (result) + goto err; + break; + case RECORD_BOOT: + if (subtable_type != SUBTABLE_FBPT) { + pr_err(FW_BUG "Invalid %d for subtable %s\n", + record_header->type, signature); + result = -EINVAL; + goto err; + } + if (record_boot) { + pr_err("Duplicate boot performance record found.\n"); + continue; + } + record_boot = (struct boot_performance_record *)record_header; + result = sysfs_create_group(fpdt_kobj, &boot_attr_group); + if (result) + goto err; + break; + + default: + /* Other types are reserved in ACPI 6.4 spec. */ + break; + } + } + return 0; + +err: + if (record_boot) + sysfs_remove_group(fpdt_kobj, &boot_attr_group); + + if (record_suspend) + sysfs_remove_group(fpdt_kobj, &suspend_attr_group); + + if (record_resume) + sysfs_remove_group(fpdt_kobj, &resume_attr_group); + + return result; +} + +static int __init acpi_init_fpdt(void) +{ + acpi_status status; + struct acpi_table_header *header; + struct fpdt_subtable_entry *subtable; + u32 offset = sizeof(*header); + int result; + + status = acpi_get_table(ACPI_SIG_FPDT, 0, &header); + + if (ACPI_FAILURE(status)) + return 0; + + fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj); + if (!fpdt_kobj) { + result = -ENOMEM; + goto err_nomem; + } + + while (offset < header->length) { + subtable = (void *)header + offset; + switch (subtable->type) { + case SUBTABLE_FBPT: + case SUBTABLE_S3PT: + result = fpdt_process_subtable(subtable->address, + subtable->type); + if (result) + goto err_subtable; + break; + default: + /* Other types are reserved in ACPI 6.4 spec. */ + break; + } + offset += sizeof(*subtable); + } + return 0; +err_subtable: + kobject_put(fpdt_kobj); + +err_nomem: + acpi_put_table(header); + return result; +} + +fs_initcall(acpi_init_fpdt); diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index 712fd31674a6..5fba4dab5d08 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_ipmi.c - ACPI IPMI opregion * * Copyright (C) 2010, 2013 Intel Corporation * Author: Zhao Yakui <yakui.zhao@intel.com> * Lv Zheng <lv.zheng@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> @@ -35,6 +22,8 @@ MODULE_LICENSE("GPL"); /* the IPMI timeout is 5s */ #define IPMI_TIMEOUT (5000) #define ACPI_IPMI_MAX_MSG_LENGTH 64 +/* 2s should be suffient for SMI being selected */ +#define ACPI_IPMI_SMI_SELECTION_TIMEOUT (2 * HZ) struct acpi_ipmi_device { /* the device list attached to driver_data.ipmi_devices */ @@ -67,6 +56,7 @@ struct ipmi_driver_data { * to this selected global IPMI system interface. */ struct acpi_ipmi_device *selected_smi; + struct completion smi_selection_done; }; struct acpi_ipmi_msg { @@ -366,29 +356,27 @@ static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi, struct acpi_ipmi_msg *msg) { - struct acpi_ipmi_msg *tx_msg, *temp; - bool msg_found = false; + struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp; unsigned long flags; spin_lock_irqsave(&ipmi->tx_msg_lock, flags); - list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { - if (msg == tx_msg) { - msg_found = true; - list_del(&tx_msg->head); + list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) { + if (msg == iter) { + tx_msg = iter; + list_del(&iter->head); break; } } spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags); - if (msg_found) + if (tx_msg) acpi_ipmi_msg_put(tx_msg); } static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) { struct acpi_ipmi_device *ipmi_device = user_msg_data; - bool msg_found = false; - struct acpi_ipmi_msg *tx_msg, *temp; + struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp; struct device *dev = ipmi_device->dev; unsigned long flags; @@ -400,16 +388,16 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) } spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); - list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { - if (msg->msgid == tx_msg->tx_msgid) { - msg_found = true; - list_del(&tx_msg->head); + list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) { + if (msg->msgid == iter->tx_msgid) { + tx_msg = iter; + list_del(&iter->head); break; } } spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); - if (!msg_found) { + if (!tx_msg) { dev_warn(dev, "Unexpected response (msg id %ld) is returned.\n", msg->msgid); @@ -478,8 +466,10 @@ static void ipmi_register_bmc(int iface, struct device *dev) if (temp->handle == handle) goto err_lock; } - if (!driver_data.selected_smi) + if (!driver_data.selected_smi) { driver_data.selected_smi = ipmi_device; + complete(&driver_data.smi_selection_done); + } list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); mutex_unlock(&driver_data.ipmi_lock); @@ -491,20 +481,18 @@ err_lock: ipmi_dev_release(ipmi_device); err_ref: put_device(smi_data.dev); - return; } static void ipmi_bmc_gone(int iface) { - struct acpi_ipmi_device *ipmi_device, *temp; - bool dev_found = false; + struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp; mutex_lock(&driver_data.ipmi_lock); - list_for_each_entry_safe(ipmi_device, temp, + list_for_each_entry_safe(iter, temp, &driver_data.ipmi_devices, head) { - if (ipmi_device->ipmi_ifnum != iface) { - dev_found = true; - __ipmi_dev_kill(ipmi_device); + if (iter->ipmi_ifnum != iface) { + ipmi_device = iter; + __ipmi_dev_kill(iter); break; } } @@ -514,7 +502,7 @@ static void ipmi_bmc_gone(int iface) struct acpi_ipmi_device, head); mutex_unlock(&driver_data.ipmi_lock); - if (dev_found) { + if (ipmi_device) { ipmi_flush_tx_msg(ipmi_device); acpi_ipmi_dev_put(ipmi_device); } @@ -595,6 +583,20 @@ out_msg: return status; } +int acpi_wait_for_acpi_ipmi(void) +{ + long ret; + + ret = wait_for_completion_interruptible_timeout(&driver_data.smi_selection_done, + ACPI_IPMI_SMI_SELECTION_TIMEOUT); + + if (ret <= 0) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_wait_for_acpi_ipmi); + static int __init acpi_ipmi_init(void) { int result; @@ -603,6 +605,8 @@ static int __init acpi_ipmi_init(void) if (acpi_disabled) return 0; + init_completion(&driver_data.smi_selection_done); + status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler, @@ -611,9 +615,14 @@ static int __init acpi_ipmi_init(void) pr_warn("Can't register IPMI opregion space handle\n"); return -EINVAL; } + result = ipmi_smi_watcher_register(&driver_data.bmc_events); - if (result) + if (result) { + acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_IPMI, + &acpi_ipmi_space_handler); pr_err("Can't register IPMI system interface watcher\n"); + } return result; } diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c index 2cd9f738812b..851f67c96097 100644 --- a/drivers/acpi/acpi_lpat.c +++ b/drivers/acpi/acpi_lpat.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * acpi_lpat.c - LPAT table processing functions * * Copyright (C) 2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/export.h> @@ -22,7 +14,7 @@ * LPAT conversion table * * @lpat_table: the temperature_raw mapping table structure - * @raw: the raw value, used as a key to get the temerature from the + * @raw: the raw value, used as a key to get the temperature from the * above mapping table * * A positive converted temperature value will be returned on success, diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index e43cb71b6972..b8d98b1b48ae 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -1,23 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * acpi_lpit.c - LPIT table processing functions * * Copyright (C) 2017 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/cpu.h> #include <linux/acpi.h> #include <asm/msr.h> #include <asm/tsc.h> +#include "internal.h" struct lpit_residency_info { struct acpi_generic_address gaddr; @@ -46,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem) return 0; } - err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter); + err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter); if (!err) { u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset + residency_info_ffh.gaddr. bit_width - 1, @@ -105,39 +98,39 @@ EXPORT_SYMBOL_GPL(lpit_read_residency_count_address); static void lpit_update_residency(struct lpit_residency_info *info, struct acpi_lpit_native *lpit_native) { + struct device *dev_root = bus_get_dev_root(&cpu_subsys); + + /* Silently fail, if cpuidle attribute group is not present */ + if (!dev_root) + return; + info->frequency = lpit_native->counter_frequency ? - lpit_native->counter_frequency : tsc_khz * 1000; + lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U); if (!info->frequency) info->frequency = 1; info->gaddr = lpit_native->residency_counter; if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - info->iomem_addr = ioremap_nocache(info->gaddr.address, + info->iomem_addr = ioremap(info->gaddr.address, info->gaddr.bit_width / 8); if (!info->iomem_addr) - return; + goto exit; - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - - /* Silently fail, if cpuidle attribute group is not present */ - sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, + sysfs_add_file_to_group(&dev_root->kobj, &dev_attr_low_power_idle_system_residency_us.attr, "cpuidle"); } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - - /* Silently fail, if cpuidle attribute group is not present */ - sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, + sysfs_add_file_to_group(&dev_root->kobj, &dev_attr_low_power_idle_cpu_residency_us.attr, "cpuidle"); } +exit: + put_device(dev_root); } static void lpit_process(u64 begin, u64 end) { - while (begin + sizeof(struct acpi_lpit_native) < end) { + while (begin + sizeof(struct acpi_lpit_native) <= end) { struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin; if (!lpit_native->header.type && !lpit_native->header.flags) { @@ -156,14 +149,14 @@ static void lpit_process(u64 begin, u64 end) void acpi_init_lpit(void) { acpi_status status; - u64 lpit_begin; struct acpi_table_lpit *lpit; status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit); - if (ACPI_FAILURE(status)) return; - lpit_begin = (u64)lpit + sizeof(*lpit); - lpit_process(lpit_begin, lpit_begin + lpit->header.length); + lpit_process((u64)lpit + sizeof(*lpit), + (u64)lpit + lpit->header.length); + + acpi_put_table((struct acpi_table_header *)lpit); } diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 8fe0960ea572..d0c1a71007d0 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2004, 2013 Intel Corporation * Author: Naveen B S <naveen.b.s@intel.com> @@ -5,17 +6,6 @@ * * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * * ACPI based HotPlug driver that supports Memory Hotplug * This driver fields notifications from firmware for memory add * and remove operations and alerts the VM of the affected memory @@ -32,13 +22,6 @@ #define ACPI_MEMORY_DEVICE_HID "PNP0C80" #define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" -#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT - -#undef PREFIX -#define PREFIX "ACPI:memory_hp:" - -ACPI_MODULE_NAME("acpi_memhotplug"); - static const struct acpi_device_id memory_device_ids[] = { {ACPI_MEMORY_DEVICE_HID, 0}, {"", 0}, @@ -46,11 +29,6 @@ static const struct acpi_device_id memory_device_ids[] = { #ifdef CONFIG_ACPI_HOTPLUG_MEMORY -/* Memory Device States */ -#define MEMORY_INVALID_STATE 0 -#define MEMORY_POWER_ON_STATE 1 -#define MEMORY_POWER_OFF_STATE 2 - static int acpi_memory_device_add(struct acpi_device *device, const struct acpi_device_id *not_used); static void acpi_memory_device_remove(struct acpi_device *device); @@ -74,9 +52,9 @@ struct acpi_memory_info { }; struct acpi_memory_device { - struct acpi_device * device; - unsigned int state; /* State of the memory device */ + struct acpi_device *device; struct list_head res_list; + int mgid; }; static acpi_status @@ -165,16 +143,6 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device) return 0; } -static unsigned long acpi_meminfo_start_pfn(struct acpi_memory_info *info) -{ - return PFN_DOWN(info->start_addr); -} - -static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info) -{ - return PFN_UP(info->start_addr + info->length-1); -} - static int acpi_bind_memblk(struct memory_block *mem, void *arg) { return acpi_bind_one(&mem->dev, arg); @@ -183,9 +151,8 @@ static int acpi_bind_memblk(struct memory_block *mem, void *arg) static int acpi_bind_memory_blocks(struct acpi_memory_info *info, struct acpi_device *adev) { - return walk_memory_range(acpi_meminfo_start_pfn(info), - acpi_meminfo_end_pfn(info), adev, - acpi_bind_memblk); + return walk_memory_blocks(info->start_addr, info->length, adev, + acpi_bind_memblk); } static int acpi_unbind_memblk(struct memory_block *mem, void *arg) @@ -196,18 +163,40 @@ static int acpi_unbind_memblk(struct memory_block *mem, void *arg) static void acpi_unbind_memory_blocks(struct acpi_memory_info *info) { - walk_memory_range(acpi_meminfo_start_pfn(info), - acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk); + walk_memory_blocks(info->start_addr, info->length, NULL, + acpi_unbind_memblk); } static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) { acpi_handle handle = mem_device->device->handle; + mhp_t mhp_flags = MHP_NID_IS_MGID; int result, num_enabled = 0; struct acpi_memory_info *info; - int node; + u64 total_length = 0; + int node, mgid; node = acpi_get_node(handle); + + list_for_each_entry(info, &mem_device->res_list, list) { + if (!info->length) + continue; + /* We want a single node for the whole memory group */ + if (node < 0) + node = memory_add_physaddr_to_nid(info->start_addr); + total_length += info->length; + } + + if (!total_length) { + dev_err(&mem_device->device->dev, "device is empty\n"); + return -EINVAL; + } + + mgid = memory_group_register_static(node, PFN_UP(total_length)); + if (mgid < 0) + return mgid; + mem_device->mgid = mgid; + /* * Tell the VM there is more memory here... * Note: Assume that this function returns zero on success @@ -215,20 +204,16 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) * (i.e. memory-hot-remove function) */ list_for_each_entry(info, &mem_device->res_list, list) { - if (info->enabled) { /* just sanity check...*/ - num_enabled++; - continue; - } /* * If the memory block size is zero, please ignore it. * Don't try to do the following memory hotplug flowchart. */ if (!info->length) continue; - if (node < 0) - node = memory_add_physaddr_to_nid(info->start_addr); - result = __add_memory(node, info->start_addr, info->length); + mhp_flags |= MHP_MEMMAP_ON_MEMORY; + result = __add_memory(mgid, info->start_addr, info->length, + mhp_flags); /* * If the memory block has been used by the kernel, add_memory() @@ -254,7 +239,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) } if (!num_enabled) { dev_err(&mem_device->device->dev, "add_memory failed\n"); - mem_device->state = MEMORY_INVALID_STATE; return -EINVAL; } /* @@ -270,19 +254,14 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device) { - acpi_handle handle = mem_device->device->handle; struct acpi_memory_info *info, *n; - int nid = acpi_get_node(handle); list_for_each_entry_safe(info, n, &mem_device->res_list, list) { if (!info->enabled) continue; - if (nid == NUMA_NO_NODE) - nid = memory_add_physaddr_to_nid(info->start_addr); - acpi_unbind_memory_blocks(info); - __remove_memory(nid, info->start_addr, info->length); + __remove_memory(info->start_addr, info->length); list_del(&info->list); kfree(info); } @@ -293,6 +272,10 @@ static void acpi_memory_device_free(struct acpi_memory_device *mem_device) if (!mem_device) return; + /* In case we succeeded adding *some* memory, unregistering fails. */ + if (mem_device->mgid >= 0) + memory_group_unregister(mem_device->mgid); + acpi_memory_free_device_resources(mem_device); mem_device->device->driver_data = NULL; kfree(mem_device); @@ -313,6 +296,7 @@ static int acpi_memory_device_add(struct acpi_device *device, INIT_LIST_HEAD(&mem_device->res_list); mem_device->device = device; + mem_device->mgid = -1; sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); device->driver_data = mem_device; @@ -325,9 +309,6 @@ static int acpi_memory_device_add(struct acpi_device *device, return result; } - /* Set the device state */ - mem_device->state = MEMORY_POWER_ON_STATE; - result = acpi_memory_check_device(mem_device); if (result) { acpi_memory_device_free(mem_device); diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c new file mode 100644 index 000000000000..6d69554c940e --- /dev/null +++ b/drivers/acpi/acpi_mrrm.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, Intel Corporation. + * + * Memory Range and Region Mapping (MRRM) structure + * + * Parse and report the platform's MRRM table in /sys. + */ + +#define pr_fmt(fmt) "acpi/mrrm: " fmt + +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/sysfs.h> + +/* Default assume one memory region covering all system memory, per the spec */ +static int max_mem_region = 1; + +/* Access for use by resctrl file system */ +int acpi_mrrm_max_mem_region(void) +{ + return max_mem_region; +} + +struct mrrm_mem_range_entry { + u64 base; + u64 length; + int node; + u8 local_region_id; + u8 remote_region_id; +}; + +static struct mrrm_mem_range_entry *mrrm_mem_range_entry; +static u32 mrrm_mem_entry_num; + +static int get_node_num(struct mrrm_mem_range_entry *e) +{ + unsigned int nid; + + for_each_online_node(nid) { + for (int z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = NODE_DATA(nid)->node_zones + z; + + if (!populated_zone(zone)) + continue; + if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length))) + return zone_to_nid(zone); + } + } + + return -ENOENT; +} + +static __init int acpi_parse_mrrm(struct acpi_table_header *table) +{ + struct acpi_mrrm_mem_range_entry *mre_entry; + struct acpi_table_mrrm *mrrm; + void *mre, *mrrm_end; + int mre_count = 0; + + mrrm = (struct acpi_table_mrrm *)table; + if (!mrrm) + return -ENODEV; + + if (mrrm->header.revision != 1) + return -EINVAL; + + if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS) + return -EOPNOTSUPP; + + mrrm_end = (void *)mrrm + mrrm->header.length - 1; + mre = (void *)mrrm + sizeof(struct acpi_table_mrrm); + while (mre < mrrm_end) { + mre_entry = mre; + mre_count++; + mre += mre_entry->header.length; + } + if (!mre_count) { + pr_info(FW_BUG "No ranges listed in MRRM table\n"); + return -EINVAL; + } + + mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry), + GFP_KERNEL | __GFP_ZERO); + if (!mrrm_mem_range_entry) + return -ENOMEM; + + mre = (void *)mrrm + sizeof(struct acpi_table_mrrm); + while (mre < mrrm_end) { + struct mrrm_mem_range_entry *e; + + mre_entry = mre; + e = mrrm_mem_range_entry + mrrm_mem_entry_num; + + e->base = mre_entry->addr_base; + e->length = mre_entry->addr_len; + e->node = get_node_num(e); + + if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL) + e->local_region_id = mre_entry->local_region_id; + else + e->local_region_id = -1; + if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE) + e->remote_region_id = mre_entry->remote_region_id; + else + e->remote_region_id = -1; + + mrrm_mem_entry_num++; + mre += mre_entry->header.length; + } + + max_mem_region = mrrm->max_mem_region; + + return 0; +} + +#define RANGE_ATTR(name, fmt) \ +static ssize_t name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + struct mrrm_mem_range_entry *mre; \ + const char *kname = kobject_name(kobj); \ + int n, ret; \ + \ + ret = kstrtoint(kname + 5, 10, &n); \ + if (ret) \ + return ret; \ + \ + mre = mrrm_mem_range_entry + n; \ + \ + return sysfs_emit(buf, fmt, mre->name); \ +} \ +static struct kobj_attribute name##_attr = __ATTR_RO(name) + +RANGE_ATTR(base, "0x%llx\n"); +RANGE_ATTR(length, "0x%llx\n"); +RANGE_ATTR(node, "%d\n"); +RANGE_ATTR(local_region_id, "%d\n"); +RANGE_ATTR(remote_region_id, "%d\n"); + +static struct attribute *memory_range_attrs[] = { + &base_attr.attr, + &length_attr.attr, + &node_attr.attr, + &local_region_id_attr.attr, + &remote_region_id_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(memory_range); + +static __init int add_boot_memory_ranges(void) +{ + struct kobject *pkobj, *kobj, **kobjs; + int ret = -EINVAL; + char name[16]; + int i; + + pkobj = kobject_create_and_add("memory_ranges", acpi_kobj); + if (!pkobj) + return -ENOMEM; + + kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL); + if (!kobjs) { + kobject_put(pkobj); + return -ENOMEM; + } + + for (i = 0; i < mrrm_mem_entry_num; i++) { + scnprintf(name, sizeof(name), "range%d", i); + kobj = kobject_create_and_add(name, pkobj); + if (!kobj) { + ret = -ENOMEM; + goto cleanup; + } + + ret = sysfs_create_groups(kobj, memory_range_groups); + if (ret) { + kobject_put(kobj); + goto cleanup; + } + kobjs[i] = kobj; + } + + kfree(kobjs); + return 0; + +cleanup: + for (int j = 0; j < i; j++) { + if (kobjs[j]) { + sysfs_remove_groups(kobjs[j], memory_range_groups); + kobject_put(kobjs[j]); + } + } + kfree(kobjs); + kobject_put(pkobj); + return ret; +} + +static __init int mrrm_init(void) +{ + int ret; + + ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm); + if (ret < 0) + return ret; + + return add_boot_memory_ranges(); +} +device_initcall(mrrm_init); diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index a47676a55b84..c9a0bcaba2e4 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -1,17 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * acpi_pad.c ACPI Processor Aggregator Driver * * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #include <linux/kernel.h> @@ -26,16 +17,23 @@ #include <linux/tick.h> #include <linux/slab.h> #include <linux/acpi.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <asm/cpuid/api.h> #include <asm/mwait.h> #include <xen/xen.h> #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 + +#define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS 0 +#define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION 1 + static DEFINE_MUTEX(isolated_cpus_lock); static DEFINE_MUTEX(round_robin_lock); -static unsigned long power_saving_mwait_eax; +static unsigned int power_saving_mwait_eax; static unsigned char tsc_detected_unstable; static unsigned char tsc_marked_unstable; @@ -49,10 +47,8 @@ static void power_saving_mwait_init(void) if (!boot_cpu_has(X86_FEATURE_MWAIT)) return; - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) @@ -73,6 +69,8 @@ static void power_saving_mwait_init(void) case X86_VENDOR_HYGON: case X86_VENDOR_AMD: case X86_VENDOR_INTEL: + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. @@ -96,7 +94,7 @@ static void round_robin_cpu(unsigned int tsk_index) cpumask_var_t tmp; int cpu; unsigned long min_weight = -1; - unsigned long uninitialized_var(preferred_cpu); + unsigned long preferred_cpu; if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; @@ -106,7 +104,7 @@ static void round_robin_cpu(unsigned int tsk_index) for_each_cpu(cpu, pad_busy_cpus) cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); - /* avoid HT sibilings if possible */ + /* avoid HT siblings if possible */ if (cpumask_empty(tmp)) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { @@ -136,20 +134,22 @@ static void round_robin_cpu(unsigned int tsk_index) static void exit_round_robin(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); - cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); - tsk_in_cpu[tsk_index] = -1; + + if (tsk_in_cpu[tsk_index] != -1) { + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = -1; + } } static unsigned int idle_pct = 5; /* percentage */ static unsigned int round_robin_time = 1; /* second */ static int power_saving_thread(void *data) { - struct sched_param param = {.sched_priority = 1}; int do_sleep; unsigned int tsk_index = (unsigned long)data; u64 last_jiffies = 0; - sched_setscheduler(current, SCHED_RR, ¶m); + sched_set_fifo_low(current); while (!kthread_should_stop()) { unsigned long expire_time; @@ -172,6 +172,9 @@ static int power_saving_thread(void *data) tsc_marked_unstable = 1; } local_irq_disable(); + + perf_lopwr_cb(true); + tick_broadcast_enable(); tick_broadcast_enter(); stop_critical_timings(); @@ -180,6 +183,9 @@ static int power_saving_thread(void *data) start_critical_timings(); tick_broadcast_exit(); + + perf_lopwr_cb(false); + local_irq_enable(); if (time_before(expire_time, jiffies)) { @@ -257,12 +263,12 @@ static void set_power_saving_task_num(unsigned int num) static void acpi_pad_idle_cpus(unsigned int num_cpus) { - get_online_cpus(); + cpus_read_lock(); num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); set_power_saving_task_num(num_cpus); - put_online_cpus(); + cpus_read_unlock(); } static uint32_t acpi_pad_idle_cpus_num(void) @@ -270,10 +276,11 @@ static uint32_t acpi_pad_idle_cpus_num(void) return ps_tsk_num; } -static ssize_t acpi_pad_rrtime_store(struct device *dev, +static ssize_t rrtime_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; + if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) @@ -284,19 +291,18 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev, return count; } -static ssize_t acpi_pad_rrtime_show(struct device *dev, +static ssize_t rrtime_show(struct device *dev, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time); + return sysfs_emit(buf, "%d\n", round_robin_time); } -static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, - acpi_pad_rrtime_show, - acpi_pad_rrtime_store); +static DEVICE_ATTR_RW(rrtime); -static ssize_t acpi_pad_idlepct_store(struct device *dev, +static ssize_t idlepct_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; + if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) @@ -307,19 +313,18 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev, return count; } -static ssize_t acpi_pad_idlepct_show(struct device *dev, +static ssize_t idlepct_show(struct device *dev, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct); + return sysfs_emit(buf, "%d\n", idle_pct); } -static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, - acpi_pad_idlepct_show, - acpi_pad_idlepct_store); +static DEVICE_ATTR_RW(idlepct); -static ssize_t acpi_pad_idlecpus_store(struct device *dev, +static ssize_t idlecpus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; + if (kstrtoul(buf, 0, &num)) return -EINVAL; mutex_lock(&isolated_cpus_lock); @@ -328,44 +333,23 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev, return count; } -static ssize_t acpi_pad_idlecpus_show(struct device *dev, +static ssize_t idlecpus_show(struct device *dev, struct device_attribute *attr, char *buf) { return cpumap_print_to_pagebuf(false, buf, to_cpumask(pad_busy_cpus_bits)); } -static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, - acpi_pad_idlecpus_show, - acpi_pad_idlecpus_store); - -static int acpi_pad_add_sysfs(struct acpi_device *device) -{ - int result; +static DEVICE_ATTR_RW(idlecpus); - result = device_create_file(&device->dev, &dev_attr_idlecpus); - if (result) - return -ENODEV; - result = device_create_file(&device->dev, &dev_attr_idlepct); - if (result) { - device_remove_file(&device->dev, &dev_attr_idlecpus); - return -ENODEV; - } - result = device_create_file(&device->dev, &dev_attr_rrtime); - if (result) { - device_remove_file(&device->dev, &dev_attr_idlecpus); - device_remove_file(&device->dev, &dev_attr_idlepct); - return -ENODEV; - } - return 0; -} +static struct attribute *acpi_pad_attrs[] = { + &dev_attr_idlecpus.attr, + &dev_attr_idlepct.attr, + &dev_attr_rrtime.attr, + NULL +}; -static void acpi_pad_remove_sysfs(struct acpi_device *device) -{ - device_remove_file(&device->dev, &dev_attr_idlecpus); - device_remove_file(&device->dev, &dev_attr_idlepct); - device_remove_file(&device->dev, &dev_attr_rrtime); -} +ATTRIBUTE_GROUPS(acpi_pad); /* * Query firmware how many CPUs should be idle @@ -403,29 +387,36 @@ static void acpi_pad_handle_notify(acpi_handle handle) .length = 4, .pointer = (void *)&idle_cpus, }; + u32 status; mutex_lock(&isolated_cpus_lock); num_cpus = acpi_pad_pur(handle); if (num_cpus < 0) { - mutex_unlock(&isolated_cpus_lock); - return; + /* The ACPI specification says that if no action was performed when + * processing the _PUR object, _OST should still be evaluated, albeit + * with a different status code. + */ + status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION; + } else { + status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS; + acpi_pad_idle_cpus(num_cpus); } - acpi_pad_idle_cpus(num_cpus); + idle_cpus = acpi_pad_idle_cpus_num(); - acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m); + acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, ¶m); mutex_unlock(&isolated_cpus_lock); } static void acpi_pad_notify(acpi_handle handle, u32 event, void *data) { - struct acpi_device *device = data; + struct acpi_device *adev = data; switch (event) { case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: acpi_pad_handle_notify(handle); - acpi_bus_generate_netlink_event(device->pnp.device_class, - dev_name(&device->dev), event, 0); + acpi_bus_generate_netlink_event(adev->pnp.device_class, + dev_name(&adev->dev), event, 0); break; default: pr_warn("Unsupported event [0x%x]\n", event); @@ -433,36 +424,33 @@ static void acpi_pad_notify(acpi_handle handle, u32 event, } } -static int acpi_pad_add(struct acpi_device *device) +static int acpi_pad_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); acpi_status status; - strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); + strscpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); + strscpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS); - if (acpi_pad_add_sysfs(device)) - return -ENODEV; + status = acpi_install_notify_handler(adev->handle, + ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev); - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); - if (ACPI_FAILURE(status)) { - acpi_pad_remove_sysfs(device); + if (ACPI_FAILURE(status)) return -ENODEV; - } return 0; } -static int acpi_pad_remove(struct acpi_device *device) +static void acpi_pad_remove(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + mutex_lock(&isolated_cpus_lock); acpi_pad_idle_cpus(0); mutex_unlock(&isolated_cpus_lock); - acpi_remove_notify_handler(device->handle, + acpi_remove_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify); - acpi_pad_remove_sysfs(device); - return 0; } static const struct acpi_device_id pad_device_ids[] = { @@ -471,13 +459,13 @@ static const struct acpi_device_id pad_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pad_device_ids); -static struct acpi_driver acpi_pad_driver = { - .name = "processor_aggregator", - .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, - .ids = pad_device_ids, - .ops = { - .add = acpi_pad_add, - .remove = acpi_pad_remove, +static struct platform_driver acpi_pad_driver = { + .probe = acpi_pad_probe, + .remove = acpi_pad_remove, + .driver = { + .dev_groups = acpi_pad_groups, + .name = "processor_aggregator", + .acpi_match_table = pad_device_ids, }, }; @@ -491,12 +479,12 @@ static int __init acpi_pad_init(void) if (power_saving_mwait_eax == 0) return -EINVAL; - return acpi_bus_register_driver(&acpi_pad_driver); + return platform_driver_register(&acpi_pad_driver); } static void __exit acpi_pad_exit(void) { - acpi_bus_unregister_driver(&acpi_pad_driver); + platform_driver_unregister(&acpi_pad_driver); } module_init(acpi_pad_init); diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c new file mode 100644 index 000000000000..97064e943768 --- /dev/null +++ b/drivers/acpi/acpi_pcc.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Sudeep Holla <sudeep.holla@arm.com> + * Copyright 2021 Arm Limited + * + * The PCC Address Space also referred as PCC Operation Region pertains to the + * region of PCC subspace that succeeds the PCC signature. The PCC Operation + * Region works in conjunction with the PCC Table(Platform Communications + * Channel Table). PCC subspaces that are marked for use as PCC Operation + * Regions must not be used as PCC subspaces for the standard ACPI features + * such as CPPC, RASF, PDTT and MPST. These standard features must always use + * the PCC Table instead. + * + * This driver sets up the PCC Address Space and installs an handler to enable + * handling of PCC OpRegion in the firmware. + * + */ +#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/io.h> + +#include <acpi/pcc.h> + +/* + * Arbitrary retries in case the remote processor is slow to respond + * to PCC commands + */ +#define PCC_CMD_WAIT_RETRIES_NUM 500ULL + +struct pcc_data { + struct pcc_mbox_chan *pcc_chan; + struct completion done; + struct mbox_client cl; + struct acpi_pcc_info ctx; +}; + +static struct acpi_pcc_info pcc_ctx; + +static void pcc_rx_callback(struct mbox_client *cl, void *m) +{ + struct pcc_data *data = container_of(cl, struct pcc_data, cl); + + complete(&data->done); +} + +static acpi_status +acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function, + void *handler_context, void **region_context) +{ + struct pcc_data *data; + struct acpi_pcc_info *ctx = handler_context; + struct pcc_mbox_chan *pcc_chan; + static acpi_status ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return AE_NO_MEMORY; + + data->cl.rx_callback = pcc_rx_callback; + data->cl.knows_txdone = true; + data->ctx.length = ctx->length; + data->ctx.subspace_id = ctx->subspace_id; + data->ctx.internal_buffer = ctx->internal_buffer; + + init_completion(&data->done); + data->pcc_chan = pcc_mbox_request_channel(&data->cl, ctx->subspace_id); + if (IS_ERR(data->pcc_chan)) { + pr_err("Failed to find PCC channel for subspace %d\n", + ctx->subspace_id); + ret = AE_NOT_FOUND; + goto err_free_data; + } + + pcc_chan = data->pcc_chan; + if (!pcc_chan->mchan->mbox->txdone_irq) { + pr_err("This channel-%d does not support interrupt.\n", + ctx->subspace_id); + ret = AE_SUPPORT; + goto err_free_channel; + } + + *region_context = data; + return AE_OK; + +err_free_channel: + pcc_mbox_free_channel(data->pcc_chan); +err_free_data: + kfree(data); + + return ret; +} + +static acpi_status +acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr, + u32 bits, acpi_integer *value, + void *handler_context, void *region_context) +{ + int ret; + struct pcc_data *data = region_context; + u64 usecs_lat; + + reinit_completion(&data->done); + + /* Write to Shared Memory */ + memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length); + + ret = mbox_send_message(data->pcc_chan->mchan, NULL); + if (ret < 0) + return AE_ERROR; + + /* + * pcc_chan->latency is just a Nominal value. In reality the remote + * processor could be much slower to reply. So add an arbitrary + * amount of wait on top of Nominal. + */ + usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency; + ret = wait_for_completion_timeout(&data->done, + usecs_to_jiffies(usecs_lat)); + if (ret == 0) { + pr_err("PCC command executed timeout!\n"); + return AE_TIME; + } + + mbox_chan_txdone(data->pcc_chan->mchan, ret); + + memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length); + + return AE_OK; +} + +void __init acpi_init_pcc(void) +{ + acpi_status status; + + status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_PLATFORM_COMM, + &acpi_pcc_address_space_handler, + &acpi_pcc_address_space_setup, + &pcc_ctx); + if (ACPI_FAILURE(status)) + pr_alert("OperationRegion handler could not be installed\n"); +} diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 1f32caa87686..48d15dd785f6 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for platform bus type. * @@ -5,13 +6,10 @@ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Mathias Nyman <mathias.nyman@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> +#include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kernel.h> @@ -22,16 +20,55 @@ #include "internal.h" -ACPI_MODULE_NAME("platform"); +/* Exclude devices that have no _CRS resources provided */ +#define ACPI_ALLOW_WO_RESOURCES BIT(0) static const struct acpi_device_id forbidden_id_list[] = { + {"ACPI0009", 0}, /* IOxAPIC */ + {"ACPI000A", 0}, /* IOAPIC */ {"PNP0000", 0}, /* PIC */ {"PNP0100", 0}, /* Timer */ {"PNP0200", 0}, /* AT DMA Controller */ - {"ACPI0009", 0}, /* IOxAPIC */ - {"ACPI000A", 0}, /* IOAPIC */ - {"SMB0001", 0}, /* ACPI SMBUS virtual device */ - {"", 0}, + {ACPI_SMBUS_MS_HID, ACPI_ALLOW_WO_RESOURCES}, /* ACPI SMBUS virtual device */ + { } +}; + +static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev) +{ + struct device *dev; + + dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev); + return dev ? to_platform_device(dev) : NULL; +} + +static int acpi_platform_device_remove_notify(struct notifier_block *nb, + unsigned long value, void *arg) +{ + struct acpi_device *adev = arg; + struct platform_device *pdev; + + switch (value) { + case ACPI_RECONFIG_DEVICE_ADD: + /* Nothing to do here */ + break; + case ACPI_RECONFIG_DEVICE_REMOVE: + if (!acpi_device_enumerated(adev)) + break; + + pdev = acpi_platform_device_find_by_companion(adev); + if (!pdev) + break; + + platform_device_unregister(pdev); + put_device(&pdev->dev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block acpi_platform_notifier = { + .notifier_call = acpi_platform_device_remove_notify, }; static void acpi_platform_fill_resource(struct acpi_device *adev, @@ -45,11 +82,20 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * If the device has parent we need to take its resources into * account as well because this device might consume part of those. */ - parent = acpi_get_first_physical_node(adev->parent); + parent = acpi_get_first_physical_node(acpi_dev_parent(adev)); if (parent && dev_is_pci(parent)) dest->parent = pci_find_resource(to_pci_dev(parent), dest); } +static unsigned int acpi_platform_resource_count(struct acpi_resource *ares, void *data) +{ + bool *has_resources = data; + + *has_resources = true; + + return AE_CTRL_TERMINATE; +} + /** * acpi_create_platform_device - Create platform device for ACPI device node * @adev: ACPI device node to create a platform device for. @@ -62,10 +108,12 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * Name of the platform device will be the same as @adev's. */ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) + const struct property_entry *properties) { + struct acpi_device *parent = acpi_dev_parent(adev); struct platform_device *pdev = NULL; struct platform_device_info pdevinfo; + const struct acpi_device_id *match; struct resource_entry *rentry; struct list_head resource_list; struct resource *resources = NULL; @@ -75,18 +123,27 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, if (adev->physical_node_count) return NULL; - if (!acpi_match_device_ids(adev, forbidden_id_list)) - return ERR_PTR(-EINVAL); + match = acpi_match_acpi_device(forbidden_id_list, adev); + if (match) { + if (match->driver_data & ACPI_ALLOW_WO_RESOURCES) { + bool has_resources = false; + + acpi_walk_resources(adev->handle, METHOD_NAME__CRS, + acpi_platform_resource_count, &has_resources); + if (has_resources) + return ERR_PTR(-EINVAL); + } else { + return ERR_PTR(-EINVAL); + } + } INIT_LIST_HEAD(&resource_list); count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); - if (count < 0) { + if (count < 0) return NULL; - } else if (count > 0) { - resources = kcalloc(count, sizeof(struct resource), - GFP_KERNEL); + if (count > 0) { + resources = kcalloc(count, sizeof(*resources), GFP_KERNEL); if (!resources) { - dev_err(&adev->dev, "No memory for resources\n"); acpi_dev_free_resource_list(&resource_list); return ERR_PTR(-ENOMEM); } @@ -104,10 +161,9 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, * attached to it, that physical device should be the parent of the * platform device we are about to create. */ - pdevinfo.parent = adev->parent ? - acpi_get_first_physical_node(adev->parent) : NULL; + pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL; pdevinfo.name = dev_name(&adev->dev); - pdevinfo.id = -1; + pdevinfo.id = PLATFORM_DEVID_NONE; pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.fwnode = acpi_fwnode_handle(adev); @@ -133,3 +189,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, return pdev; } EXPORT_SYMBOL_GPL(acpi_create_platform_device); + +void __init acpi_platform_init(void) +{ + acpi_reconfig_notifier_register(&acpi_platform_notifier); +} diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 67d97c0090a2..4ad88187dc7a 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -1,19 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for PNP bus type * * Copyright (C) 2014, Intel Corporation * Authors: Zhang Rui <rui.zhang@intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/ctype.h> +#include "internal.h" + static const struct acpi_device_id acpi_pnp_device_ids[] = { /* pata_isapnp */ {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ @@ -121,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"IBM0071"}, /* smsc-ircc2 */ {"SMCf010"}, - /* sb1000 */ - {"GIC1000"}, /* parport_pc */ {"PNP0400"}, /* Standard LPT Printer Port */ {"PNP0401"}, /* ECP Printer Port */ @@ -157,8 +154,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */ {"BRI1400"}, /* Boca Research 33,600 ACF Modem */ {"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */ - {"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */ - {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ {"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */ {"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */ {"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ @@ -320,6 +315,9 @@ static bool matching_id(const char *idstr, const char *list_id) { int i; + if (strlen(idstr) != strlen(list_id)) + return false; + if (memcmp(idstr, list_id, 3)) return false; @@ -348,10 +346,24 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc return false; } +/* + * If one of the device IDs below is present in the list of device IDs of a + * given ACPI device object, the PNP scan handler will not attach to that + * object, because there is a proper non-PNP driver in the kernel for the + * device represented by it. + */ +static const struct acpi_device_id acpi_nonpnp_device_ids[] = { + {"INT3F0D"}, + {"INTC1080"}, + {"INTC1081"}, + {"INTC1099"}, + {""}, +}; + static int acpi_pnp_attach(struct acpi_device *adev, const struct acpi_device_id *id) { - return 1; + return !!acpi_match_device_ids(adev, acpi_nonpnp_device_ids); } static struct acpi_scan_handler acpi_pnp_handler = { diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index fc447410ae4d..7ec1dc04fd11 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * acpi_processor.c - ACPI processor enumeration support * @@ -7,38 +8,44 @@ * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * Copyright (C) 2013, Intel Corporation * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ +#define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi.h> +#include <linux/cpu.h> #include <linux/device.h> +#include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/platform_device.h> #include <acpi/processor.h> #include <asm/cpu.h> -#include "internal.h" - -#define _COMPONENT ACPI_PROCESSOR_COMPONENT +#include <xen/xen.h> -ACPI_MODULE_NAME("processor"); +#include "internal.h" DEFINE_PER_CPU(struct acpi_processor *, processors); EXPORT_PER_CPU_SYMBOL(processors); -/* -------------------------------------------------------------------------- - Errata Handling - -------------------------------------------------------------------------- */ - +/* Errata Handling */ struct acpi_processor_errata errata __read_mostly; EXPORT_SYMBOL_GPL(errata); +acpi_handle acpi_get_processor_handle(int cpu) +{ + struct acpi_processor *pr; + + pr = per_cpu(processors, cpu); + if (pr) + return pr->handle; + + return NULL; +} + static int acpi_processor_errata_piix4(struct pci_dev *dev) { u8 value1 = 0; @@ -54,19 +61,19 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev) switch (dev->revision) { case 0: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); + dev_dbg(&dev->dev, "Found PIIX4 A-step\n"); break; case 1: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); + dev_dbg(&dev->dev, "Found PIIX4 B-step\n"); break; case 2: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); + dev_dbg(&dev->dev, "Found PIIX4E\n"); break; case 3: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); + dev_dbg(&dev->dev, "Found PIIX4M\n"); break; default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); + dev_dbg(&dev->dev, "Found unknown PIIX4\n"); break; } @@ -82,7 +89,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev) * PIIX4 models. */ errata.piix4.throttle = 1; - /* fall through*/ + fallthrough; case 2: /* PIIX4E */ case 3: /* PIIX4M */ @@ -132,11 +139,9 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev) } if (errata.piix4.bmisx) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Bus master activity detection (BM-IDE) erratum enabled\n")); + dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); if (errata.piix4.fdma) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Type-F DMA livelock erratum (C3 disabled)\n")); + dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); return 0; } @@ -160,70 +165,109 @@ static int acpi_processor_errata(void) return result; } -/* -------------------------------------------------------------------------- - Initialization - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI_HOTPLUG_CPU -int __weak acpi_map_cpu(acpi_handle handle, - phys_cpuid_t physid, u32 acpi_id, int *pcpu) +/* Create a platform device to represent a CPU frequency control mechanism. */ +static void cpufreq_add_device(const char *name) { - return -ENODEV; + struct platform_device *pdev; + + pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0); + if (IS_ERR(pdev)) + pr_info("%s device creation failed: %pe\n", name, pdev); } -int __weak acpi_unmap_cpu(int cpu) +#ifdef CONFIG_X86 +/* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */ +static void __init acpi_pcc_cpufreq_init(void) { - return -ENODEV; + acpi_status status; + acpi_handle handle; + + status = acpi_get_handle(NULL, "\\_SB", &handle); + if (ACPI_FAILURE(status)) + return; + + if (acpi_has_method(handle, "PCCH")) + cpufreq_add_device("pcc-cpufreq"); } +#else +static void __init acpi_pcc_cpufreq_init(void) {} +#endif /* CONFIG_X86 */ -int __weak arch_register_cpu(int cpu) +/* Initialization */ +static DEFINE_PER_CPU(void *, processor_device_array); + +static int acpi_processor_set_per_cpu(struct acpi_processor *pr, + struct acpi_device *device) { - return -ENODEV; -} + BUG_ON(pr->id >= nr_cpu_ids); + + /* + * Buggy BIOS check. + * ACPI id of processors can be reported wrongly by the BIOS. + * Don't trust it blindly + */ + if (per_cpu(processor_device_array, pr->id) != NULL && + per_cpu(processor_device_array, pr->id) != device) { + dev_warn(&device->dev, + "BIOS reported wrong ACPI id %d for the processor\n", + pr->id); + return -EINVAL; + } + /* + * processor_device_array is not cleared on errors to allow buggy BIOS + * checks. + */ + per_cpu(processor_device_array, pr->id) = device; + per_cpu(processors, pr->id) = pr; -void __weak arch_unregister_cpu(int cpu) {} + return 0; +} -static int acpi_processor_hotadd_init(struct acpi_processor *pr) +#ifdef CONFIG_ACPI_HOTPLUG_CPU +static int acpi_processor_hotadd_init(struct acpi_processor *pr, + struct acpi_device *device) { - unsigned long long sta; - acpi_status status; int ret; if (invalid_phys_cpuid(pr->phys_id)) return -ENODEV; - status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); - if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) - return -ENODEV; - cpu_maps_update_begin(); - cpu_hotplug_begin(); + cpus_write_lock(); ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); if (ret) goto out; + ret = acpi_processor_set_per_cpu(pr, device); + if (ret) { + acpi_unmap_cpu(pr->id); + goto out; + } + ret = arch_register_cpu(pr->id); if (ret) { + /* Leave the processor device array in place to detect buggy bios */ + per_cpu(processors, pr->id) = NULL; acpi_unmap_cpu(pr->id); goto out; } /* - * CPU got hot-added, but cpu_data is not initialized yet. Set a flag - * to delay cpu_idle/throttling initialization and do it when the CPU - * gets online for the first time. + * CPU got hot-added, but cpu_data is not initialized yet. Do + * cpu_idle/throttling initialization when the CPU gets online for + * the first time. */ pr_info("CPU%d has been hot-added\n", pr->id); - pr->flags.need_hotplug_init = 1; out: - cpu_hotplug_done(); + cpus_write_unlock(); cpu_maps_update_done(); return ret; } #else -static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) +static inline int acpi_processor_hotadd_init(struct acpi_processor *pr, + struct acpi_device *device) { return -ENODEV; } @@ -231,13 +275,14 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) static int acpi_processor_get_info(struct acpi_device *device) { - union acpi_object object = { 0 }; + union acpi_object object = { .processor = { 0 } }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); int device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; + int ret; acpi_processor_errata(); @@ -247,11 +292,9 @@ static int acpi_processor_get_info(struct acpi_device *device) */ if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { pr->flags.bm_control = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Bus mastering arbitration control present\n")); + dev_dbg(&device->dev, "Bus mastering arbitration control present\n"); } else - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No bus mastering arbitration control\n")); + dev_dbg(&device->dev, "No bus mastering arbitration control\n"); if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { /* Declared with "Processor" statement; match ProcessorID */ @@ -267,7 +310,6 @@ static int acpi_processor_get_info(struct acpi_device *device) } else { /* * Declared with "Device" statement; match _UID. - * Note that we don't handle string _UIDs yet. */ status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, NULL, &value); @@ -282,41 +324,54 @@ static int acpi_processor_get_info(struct acpi_device *device) } if (acpi_duplicate_processor_id(pr->acpi_id)) { - dev_err(&device->dev, - "Failed to get unique processor _UID (0x%x)\n", - pr->acpi_id); + if (pr->acpi_id == 0xff) + dev_info_once(&device->dev, + "Entry not well-defined, consider updating BIOS\n"); + else + dev_err(&device->dev, + "Failed to get unique processor _UID (0x%x)\n", + pr->acpi_id); return -ENODEV; } pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); if (invalid_phys_cpuid(pr->phys_id)) - acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); + dev_dbg(&device->dev, "Failed to get CPU physical ID.\n"); pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); - if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { + if (!cpu0_initialized) { cpu0_initialized = 1; /* * Handle UP system running SMP kernel, with no CPU * entry in MADT */ - if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) + if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) && + (num_online_cpus() == 1)) pr->id = 0; + /* + * Check availability of Processor Performance Control by + * looking at the presence of the _PCT object under the first + * processor definition. + */ + if (acpi_has_method(pr->handle, "_PCT")) + cpufreq_add_device("acpi-cpufreq"); } /* - * Extra Processor objects may be enumerated on MP systems with - * less than the max # of CPUs. They should be ignored _iff - * they are physically not present. - * - * NOTE: Even if the processor has a cpuid, it may not be present - * because cpuid <-> apicid mapping is persistent now. + * This code is not called unless we know the CPU is present and + * enabled. The two paths are: + * a) Initially present CPUs on architectures that do not defer + * their arch_register_cpu() calls until this point. + * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not + * enabled to enabled) */ - if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) { - int ret = acpi_processor_hotadd_init(pr); - if (ret) - return ret; - } + if (!get_cpu_device(pr->id)) + ret = acpi_processor_hotadd_init(pr, device); + else + ret = acpi_processor_set_per_cpu(pr, device); + if (ret) + return ret; /* * On some boxes several processors use the same processor bus id. @@ -328,11 +383,10 @@ static int acpi_processor_get_info(struct acpi_device *device) * CPU+CPU ID. */ sprintf(acpi_device_bid(device), "CPU%X", pr->id); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, - pr->acpi_id)); + dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id); if (!object.processor.pblk_address) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); + dev_dbg(&device->dev, "No PBLK (NULL address)\n"); else if (object.processor.pblk_length != 6) dev_err(&device->dev, "Invalid PBLK length [%d]\n", object.processor.pblk_length); @@ -362,8 +416,6 @@ static int acpi_processor_get_info(struct acpi_device *device) * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. * Such things have to be put in and set up by the processor driver's .probe(). */ -static DEFINE_PER_CPU(void *, processor_device_array); - static int acpi_processor_add(struct acpi_device *device, const struct acpi_device_id *id) { @@ -371,6 +423,9 @@ static int acpi_processor_add(struct acpi_device *device, struct device *dev; int result = 0; + if (!acpi_device_is_enabled(device)) + return -ENODEV; + pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); if (!pr) return -ENOMEM; @@ -381,45 +436,23 @@ static int acpi_processor_add(struct acpi_device *device, } pr->handle = device->handle; - strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); + strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); device->driver_data = pr; result = acpi_processor_get_info(device); if (result) /* Processor is not physically present or unavailable */ - return 0; - - BUG_ON(pr->id >= nr_cpu_ids); - - /* - * Buggy BIOS check. - * ACPI id of processors can be reported wrongly by the BIOS. - * Don't trust it blindly - */ - if (per_cpu(processor_device_array, pr->id) != NULL && - per_cpu(processor_device_array, pr->id) != device) { - dev_warn(&device->dev, - "BIOS reported wrong ACPI id %d for the processor\n", - pr->id); - /* Give up, but do not abort the namespace scan. */ - goto err; - } - /* - * processor_device_array is not cleared on errors to allow buggy BIOS - * checks. - */ - per_cpu(processor_device_array, pr->id) = device; - per_cpu(processors, pr->id) = pr; + goto err_clear_driver_data; dev = get_cpu_device(pr->id); if (!dev) { result = -ENODEV; - goto err; + goto err_clear_per_cpu; } result = acpi_bind_one(dev, device); if (result) - goto err; + goto err_clear_per_cpu; pr->dev = dev; @@ -430,21 +463,19 @@ static int acpi_processor_add(struct acpi_device *device, dev_err(dev, "Processor driver could not be attached\n"); acpi_unbind_one(dev); - err: - free_cpumask_var(pr->throttling.shared_cpu_map); - device->driver_data = NULL; + err_clear_per_cpu: per_cpu(processors, pr->id) = NULL; + err_clear_driver_data: + device->driver_data = NULL; + free_cpumask_var(pr->throttling.shared_cpu_map); err_free_pr: kfree(pr); return result; } #ifdef CONFIG_ACPI_HOTPLUG_CPU -/* -------------------------------------------------------------------------- - Removal - -------------------------------------------------------------------------- */ - -static void acpi_processor_remove(struct acpi_device *device) +/* Removal */ +static void acpi_processor_post_eject(struct acpi_device *device) { struct acpi_processor *pr; @@ -466,18 +497,18 @@ static void acpi_processor_remove(struct acpi_device *device) device_release_driver(pr->dev); acpi_unbind_one(pr->dev); - /* Clean up. */ - per_cpu(processor_device_array, pr->id) = NULL; - per_cpu(processors, pr->id) = NULL; - cpu_maps_update_begin(); - cpu_hotplug_begin(); + cpus_write_lock(); /* Remove the CPU. */ arch_unregister_cpu(pr->id); acpi_unmap_cpu(pr->id); - cpu_hotplug_done(); + /* Clean up. */ + per_cpu(processor_device_array, pr->id) = NULL; + per_cpu(processors, pr->id) = NULL; + + cpus_write_unlock(); cpu_maps_update_done(); try_offline_node(cpu_to_node(pr->id)); @@ -488,54 +519,110 @@ static void acpi_processor_remove(struct acpi_device *device) } #endif /* CONFIG_ACPI_HOTPLUG_CPU */ -#ifdef CONFIG_X86 -static bool acpi_hwp_native_thermal_lvt_set; -static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, - u32 lvl, - void *context, - void **rv) +#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC +bool __init processor_physically_present(acpi_handle handle) { - u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; - u32 capbuf[2]; + int cpuid, type; + u32 acpi_id; + acpi_status status; + acpi_object_type acpi_type; + unsigned long long tmp; + union acpi_object object = {}; + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; + + status = acpi_get_type(handle, &acpi_type); + if (ACPI_FAILURE(status)) + return false; + + switch (acpi_type) { + case ACPI_TYPE_PROCESSOR: + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) + return false; + acpi_id = object.processor.proc_id; + break; + case ACPI_TYPE_DEVICE: + status = acpi_evaluate_integer(handle, METHOD_NAME__UID, + NULL, &tmp); + if (ACPI_FAILURE(status)) + return false; + acpi_id = tmp; + break; + default: + return false; + } + + if (xen_initial_domain()) + /* + * When running as a Xen dom0 the number of processors Linux + * sees can be different from the real number of processors on + * the system, and we still need to execute _PDC or _OSC for + * all of them. + */ + return xen_processor_present(acpi_id); + + type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; + cpuid = acpi_get_cpuid(handle, type, acpi_id); + + return !invalid_logical_cpuid(cpuid); +} + +/* vendor specific UUID indicating an Intel platform */ +static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; + +static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl, + void *context, void **rv) +{ + u32 capbuf[2] = {}; struct acpi_osc_context osc_context = { .uuid_str = sb_uuid_str, .rev = 1, .cap.length = 8, .cap.pointer = capbuf, }; + acpi_status status; - if (acpi_hwp_native_thermal_lvt_set) - return AE_CTRL_TERMINATE; + if (!processor_physically_present(handle)) + return AE_OK; - capbuf[0] = 0x0000; - capbuf[1] = 0x1000; /* set bit 12 */ + arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]); - if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { - if (osc_context.ret.pointer && osc_context.ret.length > 1) { - u32 *capbuf_ret = osc_context.ret.pointer; + status = acpi_run_osc(handle, &osc_context); + if (ACPI_FAILURE(status)) + return status; - if (capbuf_ret[1] & 0x1000) { - acpi_handle_info(handle, - "_OSC native thermal LVT Acked\n"); - acpi_hwp_native_thermal_lvt_set = true; - } - } - kfree(osc_context.ret.pointer); - } + kfree(osc_context.ret.pointer); return AE_OK; } -void __init acpi_early_processor_osc(void) +static bool __init acpi_early_processor_osc(void) { - if (boot_cpu_has(X86_FEATURE_HWP)) { - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, - acpi_hwp_native_thermal_lvt_osc, - NULL, NULL, NULL); - acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, - acpi_hwp_native_thermal_lvt_osc, - NULL, NULL); + acpi_status status; + + acpi_proc_quirk_mwait_check(); + + status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_processor_osc, NULL, + NULL, NULL); + if (ACPI_FAILURE(status)) + return false; + + status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc, + NULL, NULL); + if (ACPI_FAILURE(status)) + return false; + + return true; +} + +void __init acpi_early_processor_control_setup(void) +{ + if (acpi_early_processor_osc()) { + pr_debug("_OSC evaluated successfully for all CPUs\n"); + } else { + pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n"); + acpi_early_processor_set_pdc(); } } #endif @@ -556,7 +643,7 @@ static struct acpi_scan_handler processor_handler = { .ids = processor_device_ids, .attach = acpi_processor_add, #ifdef CONFIG_ACPI_HOTPLUG_CPU - .detach = acpi_processor_remove, + .post_eject = acpi_processor_post_eject, #endif .hotplug = { .enabled = true, @@ -703,4 +790,209 @@ void __init acpi_processor_init(void) acpi_processor_check_duplicates(); acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); + acpi_pcc_cpufreq_init(); +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE"); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) { + acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n", + i, element->type); + continue; + } + + if (element->package.count != 4) { + acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n", + i, element->package.count); + continue; + } + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) { + acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n", + i, obj->type); + continue; + } + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) { + acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n", + i, obj->type); + continue; + } + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n", + i); + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n", + i, reg->space_id); + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) { + acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n", + i, obj->type); + continue; + } + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) { + acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n", + i, obj->type); + continue; + } + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_debug(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + +end: + kfree(buffer.pointer); + + return ret; } +EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE"); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index 33a4bcdaa4d7..6d870d97ada6 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -27,6 +27,7 @@ #include <linux/pm_runtime.h> #include <linux/suspend.h> +MODULE_DESCRIPTION("ACPI Time and Alarm (TAD) Device Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rafael J. Wysocki"); @@ -89,19 +90,18 @@ static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt) args[0].buffer.pointer = (u8 *)rt; args[0].buffer.length = sizeof(*rt); - pm_runtime_get_sync(dev); + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status) || retval) return -EIO; return 0; } -static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt) +static int acpi_tad_evaluate_grt(struct device *dev, struct acpi_tad_rt *rt) { acpi_handle handle = ACPI_HANDLE(dev); struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER }; @@ -110,12 +110,7 @@ static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt) acpi_status status; int ret = -EIO; - pm_runtime_get_sync(dev); - status = acpi_evaluate_object(handle, "_GRT", NULL, &output); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status)) goto out_free; @@ -138,6 +133,21 @@ out_free: return ret; } +static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt) +{ + int ret; + + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; + + ret = acpi_tad_evaluate_grt(dev, rt); + if (ret) + return ret; + + return 0; +} + static char *acpi_tad_rt_next_field(char *s, int *val) { char *p; @@ -232,12 +242,12 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr, if (ret) return ret; - return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n", + return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n", rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second, rt.tz, rt.daylight); } -static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store); +static DEVICE_ATTR_RW(time); static struct attribute *acpi_tad_time_attrs[] = { &dev_attr_time.attr, @@ -265,12 +275,11 @@ static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id, args[0].integer.value = timer_id; args[1].integer.value = value; - pm_runtime_get_sync(dev); + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; status = acpi_evaluate_integer(handle, method, &arg_list, &retval); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status) || retval) return -EIO; @@ -313,12 +322,11 @@ static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method, args[0].integer.value = timer_id; - pm_runtime_get_sync(dev); + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; status = acpi_evaluate_integer(handle, method, &arg_list, &retval); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status)) return -EIO; @@ -369,12 +377,11 @@ static int acpi_tad_clear_status(struct device *dev, u32 timer_id) args[0].integer.value = timer_id; - pm_runtime_get_sync(dev); + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status) || retval) return -EIO; @@ -410,12 +417,11 @@ static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id) args[0].integer.value = timer_id; - pm_runtime_get_sync(dev); + PM_RUNTIME_ACQUIRE(dev, pm); + if (PM_RUNTIME_ACQUIRE_ERR(&pm)) + return -ENXIO; status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval); - - pm_runtime_put_sync(dev); - if (ACPI_FAILURE(status)) return -EIO; @@ -427,7 +433,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, { struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); - return sprintf(buf, "0x%02X\n", dd->capabilities); + return sysfs_emit(buf, "0x%02X\n", dd->capabilities); } static DEVICE_ATTR_RO(caps); @@ -446,7 +452,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr, return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER); } -static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store); +static DEVICE_ATTR_RW(ac_alarm); static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -462,7 +468,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr, return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER); } -static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store); +static DEVICE_ATTR_RW(ac_policy); static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -478,7 +484,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr, return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER); } -static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store); +static DEVICE_ATTR_RW(ac_status); static struct attribute *acpi_tad_attrs[] = { &dev_attr_caps.attr, @@ -505,7 +511,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr, return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER); } -static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store); +static DEVICE_ATTR_RW(dc_alarm); static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -521,7 +527,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr, return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER); } -static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store); +static DEVICE_ATTR_RW(dc_policy); static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -537,7 +543,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr, return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER); } -static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store); +static DEVICE_ATTR_RW(dc_status); static struct attribute *acpi_tad_dc_attrs[] = { &dev_attr_dc_alarm.attr, @@ -554,30 +560,34 @@ static int acpi_tad_disable_timer(struct device *dev, u32 timer_id) return acpi_tad_wake_set(dev, "_STV", timer_id, ACPI_TAD_WAKE_DISABLED); } -static int acpi_tad_remove(struct platform_device *pdev) +static void acpi_tad_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); device_init_wakeup(dev, false); - pm_runtime_get_sync(dev); + if (dd->capabilities & ACPI_TAD_RT) + sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group); if (dd->capabilities & ACPI_TAD_DC_WAKE) sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group); - acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); - acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); - if (dd->capabilities & ACPI_TAD_DC_WAKE) { - acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER); - acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER); + scoped_guard(pm_runtime_noresume, dev) { + acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); + acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); + if (dd->capabilities & ACPI_TAD_DC_WAKE) { + acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER); + acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER); + } } - pm_runtime_put_sync(dev); + pm_runtime_suspend(dev); pm_runtime_disable(dev); - return 0; + acpi_remove_cmos_rtc_space_handler(handle); } static int acpi_tad_probe(struct platform_device *pdev) @@ -589,6 +599,11 @@ static int acpi_tad_probe(struct platform_device *pdev) unsigned long long caps; int ret; + ret = acpi_install_cmos_rtc_space_handler(handle); + if (ret < 0) { + dev_info(dev, "Unable to install space handler\n"); + return -ENODEV; + } /* * Initialization failure messages are mostly about firmware issues, so * print them at the "info" level. @@ -596,22 +611,27 @@ static int acpi_tad_probe(struct platform_device *pdev) status = acpi_evaluate_integer(handle, "_GCP", NULL, &caps); if (ACPI_FAILURE(status)) { dev_info(dev, "Unable to get capabilities\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } if (!(caps & ACPI_TAD_AC_WAKE)) { dev_info(dev, "Unsupported capabilities\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } if (!acpi_has_method(handle, "_PRW")) { dev_info(dev, "Missing _PRW\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL); - if (!dd) - return -ENOMEM; + if (!dd) { + ret = -ENOMEM; + goto remove_handler; + } dd->capabilities = caps; dev_set_drvdata(dev, dd); @@ -624,7 +644,7 @@ static int acpi_tad_probe(struct platform_device *pdev) */ device_init_wakeup(dev, true); dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | - DPM_FLAG_LEAVE_SUSPENDED); + DPM_FLAG_MAY_SKIP_RESUME); /* * The platform bus type layer tells the ACPI PM domain powers up the * device, so set the runtime PM status of it to "active". @@ -653,6 +673,11 @@ static int acpi_tad_probe(struct platform_device *pdev) fail: acpi_tad_remove(pdev); + /* Don't fallthrough because cmos rtc space handler is removed in acpi_tad_remove() */ + return ret; + +remove_handler: + acpi_remove_cmos_rtc_space_handler(handle); return ret; } diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index f0b52266b3ac..be8e7e18abca 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1,25 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * video.c - ACPI Video Driver * * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> * Copyright (C) 2004 Bruno Ducrot <ducrot@poupinou.org> * Copyright (C) 2006 Thomas Tuttle <linux-kernel@ttuttle.net> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: video: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -38,17 +27,13 @@ #include <linux/acpi.h> #include <acpi/video.h> #include <linux/uaccess.h> - -#define PREFIX "ACPI: " +#include <linux/string_choices.h> #define ACPI_VIDEO_BUS_NAME "Video Bus" #define ACPI_VIDEO_DEVICE_NAME "Video Device" #define MAX_NAME_LEN 20 -#define _COMPONENT ACPI_VIDEO_COMPONENT -ACPI_MODULE_NAME("video"); - MODULE_AUTHOR("Bruno Ducrot"); MODULE_DESCRIPTION("ACPI Video Driver"); MODULE_LICENSE("GPL"); @@ -63,9 +48,6 @@ module_param(brightness_switch_enabled, bool, 0644); static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); -static int disable_backlight_sysfs_if = -1; -module_param(disable_backlight_sysfs_if, int, 0444); - #define REPORT_OUTPUT_KEY_EVENTS 0x01 #define REPORT_BRIGHTNESS_KEY_EVENTS 0x02 static int report_key_events = -1; @@ -73,6 +55,12 @@ module_param(report_key_events, int, 0644); MODULE_PARM_DESC(report_key_events, "0: none, 1: output changes, 2: brightness changes, 3: all"); +static int hw_changes_brightness = -1; +module_param(hw_changes_brightness, int, 0644); +MODULE_PARM_DESC(hw_changes_brightness, + "Set this to 1 on buggy hw which changes the brightness itself when " + "a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness"); + /* * Whether the struct acpi_video_device_attrib::device_id_scheme bit should be * assumed even if not actually set. @@ -80,17 +68,17 @@ MODULE_PARM_DESC(report_key_events, static bool device_id_scheme = false; module_param(device_id_scheme, bool, 0444); -static int only_lcd = -1; +static int only_lcd; module_param(only_lcd, int, 0444); +static bool may_report_brightness_keys; static int register_count; static DEFINE_MUTEX(register_count_mutex); static DEFINE_MUTEX(video_list_lock); static LIST_HEAD(video_bus_head); static int acpi_video_bus_add(struct acpi_device *device); -static int acpi_video_bus_remove(struct acpi_device *device); -static void acpi_video_bus_notify(struct acpi_device *device, u32 event); -void acpi_video_detect_exit(void); +static void acpi_video_bus_remove(struct acpi_device *device); +static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data); /* * Indices in the _BCL method response: the first two items are special, @@ -117,7 +105,6 @@ static struct acpi_driver acpi_video_bus = { .ops = { .add = acpi_video_bus_add, .remove = acpi_video_bus_remove, - .notify = acpi_video_bus_notify, }, }; @@ -267,8 +254,7 @@ static const struct backlight_ops acpi_backlight_ops = { static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { - struct acpi_device *device = cooling_dev->devdata; - struct acpi_video_device *video = acpi_driver_data(device); + struct acpi_video_device *video = cooling_dev->devdata; *state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1; return 0; @@ -277,8 +263,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev, static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { - struct acpi_device *device = cooling_dev->devdata; - struct acpi_video_device *video = acpi_driver_data(device); + struct acpi_video_device *video = cooling_dev->devdata; unsigned long long level; int offset; @@ -297,8 +282,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, static int video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state) { - struct acpi_device *device = cooling_dev->devdata; - struct acpi_video_device *video = acpi_driver_data(device); + struct acpi_video_device *video = cooling_dev->devdata; int level; if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL) @@ -333,11 +317,11 @@ acpi_video_device_lcd_query_levels(acpi_handle handle, *levels = NULL; status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer); - if (!ACPI_SUCCESS(status)) + if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *)buffer.pointer; if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _BCL data\n"); + acpi_handle_info(handle, "Invalid _BCL data\n"); status = -EFAULT; goto err; } @@ -361,7 +345,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) status = acpi_execute_simple_method(device->dev->handle, "_BCM", level); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, "Evaluating _BCM failed")); + acpi_handle_info(device->dev->handle, "_BCM evaluation failed\n"); return -EIO; } @@ -375,7 +359,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) return 0; } - ACPI_ERROR((AE_INFO, "Current brightness invalid")); + acpi_handle_info(device->dev->handle, "Current brightness invalid\n"); return -EINVAL; } @@ -391,14 +375,6 @@ static int video_set_bqc_offset(const struct dmi_system_id *d) return 0; } -static int video_disable_backlight_sysfs_if( - const struct dmi_system_id *d) -{ - if (disable_backlight_sysfs_if == -1) - disable_backlight_sysfs_if = 1; - return 0; -} - static int video_set_device_id_scheme(const struct dmi_system_id *d) { device_id_scheme = true; @@ -418,6 +394,14 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } +static int video_hw_changes_brightness( + const struct dmi_system_id *d) +{ + if (hw_changes_brightness == -1) + hw_changes_brightness = 1; + return 0; +} + static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -464,40 +448,6 @@ static const struct dmi_system_id video_dmi_table[] = { }, /* - * Some machines have a broken acpi-video interface for brightness - * control, but still need an acpi_video_device_lcd_set_level() call - * on resume to turn the backlight power on. We Enable backlight - * control on these systems, but do not register a backlight sysfs - * as brightness control does not work. - */ - { - /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ - .callback = video_disable_backlight_sysfs_if, - .ident = "Toshiba Portege R700", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"), - }, - }, - { - /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ - .callback = video_disable_backlight_sysfs_if, - .ident = "Toshiba Portege R830", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), - }, - }, - { - /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ - .callback = video_disable_backlight_sysfs_if, - .ident = "Toshiba Satellite R830", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), - }, - }, - /* * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * but the IDs actually follow the Device ID Scheme. */ @@ -542,6 +492,39 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), }, }, + { + .callback = video_set_report_key_events, + .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS), + .ident = "Dell Vostro 3350", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"), + }, + }, + { + .callback = video_set_report_key_events, + .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS), + .ident = "COLORFUL X15 AT 23", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"), + DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"), + }, + }, + /* + * Some machines change the brightness themselves when a brightness + * hotkey gets pressed, despite us telling them not to. In this case + * acpi_video_device_notify() should only call backlight_force_update( + * BACKLIGHT_UPDATE_HOTKEY) and not do anything else. + */ + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */ + .callback = video_hw_changes_brightness, + .ident = "Packard Bell EasyNote MZ35", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"), + DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"), + }, + }, {} }; @@ -562,7 +545,7 @@ acpi_video_bqc_value_to_level(struct acpi_video_device *device, ACPI_VIDEO_FIRST_LEVEL - 1 - bqc_value; level = device->brightness->levels[bqc_value + - ACPI_VIDEO_FIRST_LEVEL]; + ACPI_VIDEO_FIRST_LEVEL]; } else { level = bqc_value; } @@ -606,9 +589,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, * BQC returned an invalid level. * Stop using it. */ - ACPI_WARNING((AE_INFO, - "%s returned an invalid level", - buf)); + acpi_handle_info(device->dev->handle, + "%s returned an invalid level", buf); device->cap._BQC = device->cap._BCQ = 0; } else { /* @@ -619,7 +601,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, * ACPI video backlight still works w/ buggy _BQC. * http://bugzilla.kernel.org/show_bug.cgi?id=12233 */ - ACPI_WARNING((AE_INFO, "Evaluating %s failed", buf)); + acpi_handle_info(device->dev->handle, + "%s evaluation failed", buf); device->cap._BQC = device->cap._BCQ = 0; } } @@ -628,43 +611,62 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, return 0; } +/** + * acpi_video_device_EDID() - Get EDID from ACPI _DDC + * @device: video output device (LCD, CRT, ..) + * @edid: address for returned EDID pointer + * @length: _DDC length to request (must be a multiple of 128) + * + * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written + * to the @edid address, and the length of the EDID is returned. The caller is + * responsible for freeing the edid pointer. + * + * Return the length of EDID (positive value) on success or error (negative + * value). + */ static int -acpi_video_device_EDID(struct acpi_video_device *device, - union acpi_object **edid, ssize_t length) +acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length) { - int status; + acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; - + int ret; *edid = NULL; if (!device) return -ENODEV; - if (length == 128) - arg0.integer.value = 1; - else if (length == 256) - arg0.integer.value = 2; - else + if (!length || (length % 128)) return -EINVAL; + arg0.integer.value = length / 128; + status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; obj = buffer.pointer; - if (obj && obj->type == ACPI_TYPE_BUFFER) - *edid = obj; - else { - printk(KERN_ERR PREFIX "Invalid _DDC data\n"); - status = -EFAULT; - kfree(obj); + /* + * Some buggy implementations incorrectly return the EDID buffer in an ACPI package. + * In this case, extract the buffer from the package. + */ + if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1) + obj = &obj->package.elements[0]; + + if (obj && obj->type == ACPI_TYPE_BUFFER) { + *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL); + ret = *edid ? obj->buffer.length : -ENOMEM; + } else { + acpi_handle_debug(device->dev->handle, + "Invalid _DDC data for length %d\n", length); + ret = -EFAULT; } - return status; + kfree(buffer.pointer); + return ret; } /* bus */ @@ -683,9 +685,13 @@ acpi_video_device_EDID(struct acpi_video_device *device, * event notify code. * lcd_flag : * 0. The system BIOS should automatically control the brightness level - * of the LCD when the power changes from AC to DC + * of the LCD when: + * - the power changes from AC to DC (ACPI appendix B) + * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs) * 1. The system BIOS should NOT automatically control the brightness - * level of the LCD when the power changes from AC to DC. + * level of the LCD when: + * - the power changes from AC to DC (ACPI appendix B) + * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs) * Return Value: * -EINVAL wrong arg. */ @@ -807,10 +813,9 @@ int acpi_video_get_levels(struct acpi_device *device, int result = 0; u32 value; - if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle, - &obj))) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " - "LCD brightness level\n")); + if (ACPI_FAILURE(acpi_video_device_lcd_query_levels(device->handle, &obj))) { + acpi_handle_debug(device->handle, + "Could not query available LCD brightness level\n"); result = -ENODEV; goto out; } @@ -822,7 +827,6 @@ int acpi_video_get_levels(struct acpi_device *device, br = kzalloc(sizeof(*br), GFP_KERNEL); if (!br) { - printk(KERN_ERR "can't allocate memory\n"); result = -ENOMEM; goto out; } @@ -843,7 +847,7 @@ int acpi_video_get_levels(struct acpi_device *device, for (i = 0; i < obj->package.count; i++) { o = (union acpi_object *)&obj->package.elements[i]; if (o->type != ACPI_TYPE_INTEGER) { - printk(KERN_ERR PREFIX "Invalid data\n"); + acpi_handle_info(device->handle, "Invalid data\n"); continue; } value = (u32) o->integer.value; @@ -880,7 +884,8 @@ int acpi_video_get_levels(struct acpi_device *device, br->levels[i] = br->levels[i - level_ac_battery]; count += level_ac_battery; } else if (level_ac_battery > ACPI_VIDEO_FIRST_LEVEL) - ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package")); + acpi_handle_info(device->handle, + "Too many duplicates in _BCL package"); /* Check if the _BCL package is in a reversed order */ if (max_level == br->levels[ACPI_VIDEO_FIRST_LEVEL]) { @@ -890,8 +895,8 @@ int acpi_video_get_levels(struct acpi_device *device, sizeof(br->levels[ACPI_VIDEO_FIRST_LEVEL]), acpi_video_cmp_level, NULL); } else if (max_level != br->levels[count - 1]) - ACPI_ERROR((AE_INFO, - "Found unordered _BCL package")); + acpi_handle_info(device->handle, + "Found unordered _BCL package"); br->count = count; *dev_br = br; @@ -923,7 +928,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) int i, max_level = 0; unsigned long long level, level_old; struct acpi_video_device_brightness *br = NULL; - int result = -EINVAL; + int result; result = acpi_video_get_levels(device->dev, &br, &max_level); if (result) @@ -969,9 +974,9 @@ set_level: if (result) goto out_free_levels; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "found %d brightness levels\n", - br->count - ACPI_VIDEO_FIRST_LEVEL)); + acpi_handle_debug(device->dev->handle, "found %d brightness levels\n", + br->count - ACPI_VIDEO_FIRST_LEVEL); + return 0; out_free_levels: @@ -1003,7 +1008,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (acpi_has_method(device->dev->handle, "_BQC")) { device->cap._BQC = 1; } else if (acpi_has_method(device->dev->handle, "_BCQ")) { - printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n"); + acpi_handle_info(device->dev->handle, + "_BCQ is used instead of _BQC\n"); device->cap._BCQ = 1; } @@ -1063,8 +1069,7 @@ static int acpi_video_bus_check(struct acpi_video_bus *video) /* Does this device support video switching? */ if (video->cap._DOS || video->cap._DOD) { if (!video->cap._DOS) { - printk(KERN_WARNING FW_BUG - "ACPI(%s) defines _DOD but not _DOS\n", + pr_info(FW_BUG "ACPI(%s) defines _DOD but not _DOS\n", acpi_device_bid(video->device)); } video->flags.multihead = 1; @@ -1124,28 +1129,28 @@ acpi_video_get_device_type(struct acpi_video_bus *video, return 0; } -static int -acpi_video_bus_get_one_device(struct acpi_device *device, - struct acpi_video_bus *video) +static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg) { - unsigned long long device_id; - int status, device_type; - struct acpi_video_device *data; + struct acpi_video_bus *video = arg; struct acpi_video_device_attrib *attribute; + struct acpi_video_device *data; + unsigned long long device_id; + acpi_status status; + int device_type; - status = - acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); - /* Some device omits _ADR, we skip them instead of fail */ + status = acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); + /* Skip devices without _ADR instead of failing. */ if (ACPI_FAILURE(status)) - return 0; + goto exit; data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL); - if (!data) + if (!data) { + dev_dbg(&device->dev, "Cannot attach\n"); return -ENOMEM; + } - strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); - device->driver_data = data; + strscpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS); data->device_id = device_id; data->video = video; @@ -1197,11 +1202,16 @@ acpi_video_bus_get_one_device(struct acpi_device *device, acpi_video_device_bind(video, data); acpi_video_device_find_cap(data); + if (data->cap._BCM && data->cap._BCL) + may_report_brightness_keys = true; + mutex_lock(&video->device_list_lock); list_add_tail(&data->entry, &video->video_device_list); mutex_unlock(&video->device_list_lock); - return status; +exit: + video->child_count++; + return 0; } /* @@ -1252,7 +1262,8 @@ acpi_video_device_bind(struct acpi_video_bus *video, ids = &video->attached_array[i]; if (device->device_id == (ids->value.int_val & 0xffff)) { ids->bind_info = device; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i)); + acpi_handle_debug(video->device->handle, "%s: %d\n", + __func__, i); } } } @@ -1304,20 +1315,22 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) return AE_NOT_EXIST; status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); - if (!ACPI_SUCCESS(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); + if (ACPI_FAILURE(status)) { + acpi_handle_info(video->device->handle, + "_DOD evaluation failed: %s\n", + acpi_format_exception(status)); return status; } dod = buffer.pointer; if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid _DOD data")); + acpi_handle_info(video->device->handle, "Invalid _DOD data\n"); status = -EFAULT; goto out; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n", - dod->package.count)); + acpi_handle_debug(video->device->handle, "Found %d video heads in _DOD\n", + dod->package.count); active_list = kcalloc(1 + dod->package.count, sizeof(struct acpi_video_enumerated_device), @@ -1332,15 +1345,18 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) obj = &dod->package.elements[i]; if (obj->type != ACPI_TYPE_INTEGER) { - printk(KERN_ERR PREFIX - "Invalid _DOD data in element %d\n", i); + acpi_handle_info(video->device->handle, + "Invalid _DOD data in element %d\n", i); continue; } active_list[count].value.int_val = obj->integer.value; active_list[count].bind_info = NULL; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i, - (int)obj->integer.value)); + + acpi_handle_debug(video->device->handle, + "_DOD element[%d] = %d\n", i, + (int)obj->integer.value); + count++; } @@ -1370,7 +1386,7 @@ acpi_video_get_next_level(struct acpi_video_device *device, break; } } - /* Ajust level_current to closest available level */ + /* Adjust level_current to closest available level */ level_current += delta; for (i = ACPI_VIDEO_FIRST_LEVEL; i < device->brightness->count; i++) { l = device->brightness->levels[i]; @@ -1431,7 +1447,8 @@ acpi_video_switch_brightness(struct work_struct *work) out: if (result) - printk(KERN_ERR PREFIX "Failed to switch the brightness\n"); + acpi_handle_info(device->dev->handle, + "Failed to switch brightness\n"); } int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, @@ -1439,9 +1456,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, { struct acpi_video_bus *video; struct acpi_video_device *video_device; - union acpi_object *buffer = NULL; - acpi_status status; - int i, length; + int i, length, ret; if (!device || !acpi_driver_data(device)) return -EINVAL; @@ -1450,7 +1465,6 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, for (i = 0; i < video->attached_count; i++) { video_device = video->attached_array[i].bind_info; - length = 256; if (!video_device) continue; @@ -1481,21 +1495,11 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, continue; } - status = acpi_video_device_EDID(video_device, &buffer, length); - - if (ACPI_FAILURE(status) || !buffer || - buffer->type != ACPI_TYPE_BUFFER) { - length = 128; - status = acpi_video_device_EDID(video_device, &buffer, - length); - if (ACPI_FAILURE(status) || !buffer || - buffer->type != ACPI_TYPE_BUFFER) { - continue; - } + for (length = 512; length > 0; length -= 128) { + ret = acpi_video_device_EDID(video_device, edid, length); + if (ret > 0) + return ret; } - - *edid = buffer->buffer.pointer; - return length; } return -ENODEV; @@ -1506,9 +1510,6 @@ static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) { - int status = 0; - struct acpi_device *dev; - /* * There are systems where video module known to work fine regardless * of broken _DOD and ignoring returned value here doesn't cause @@ -1516,23 +1517,14 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, */ acpi_video_device_enumerate(video); - list_for_each_entry(dev, &device->children, node) { - - status = acpi_video_bus_get_one_device(dev, video); - if (status) { - dev_err(&dev->dev, "Can't attach device\n"); - break; - } - video->child_count++; - } - return status; + return acpi_dev_for_each_child(device, acpi_video_bus_get_one_device, video); } /* acpi_video interface */ /* * Win8 requires setting bit2 of _DOS to let firmware know it shouldn't - * preform any automatic brightness change on receiving a notification. + * perform any automatic brightness change on receiving a notification. */ static int acpi_video_bus_start_devices(struct acpi_video_bus *video) { @@ -1546,8 +1538,9 @@ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) acpi_osi_is_win8() ? 0 : 1); } -static void acpi_video_bus_notify(struct acpi_device *device, u32 event) +static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data) { + struct acpi_device *device = data; struct acpi_video_bus *video = acpi_driver_data(device); struct input_dev *input; int keycode = 0; @@ -1581,8 +1574,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) break; default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); + acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", + event); break; } @@ -1596,8 +1589,6 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) input_report_key(input, keycode, 0); input_sync(input); } - - return; } static void brightness_switch_event(struct acpi_video_device *video_device, @@ -1625,6 +1616,14 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) bus = video_device->video; input = bus->input; + if (hw_changes_brightness > 0) { + if (video_device->backlight) + backlight_force_update(video_device->backlight, + BACKLIGHT_UPDATE_HOTKEY); + acpi_notifier_call_chain(device, event, 0); + return; + } + switch (event) { case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */ brightness_switch_event(video_device, event); @@ -1647,11 +1646,13 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) keycode = KEY_DISPLAY_OFF; break; default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); + acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event); break; } + if (keycode) + may_report_brightness_keys = true; + acpi_notifier_call_chain(device, event, 0); if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) { @@ -1660,8 +1661,6 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) input_report_key(input, keycode, 0); input_sync(input); } - - return; } static int acpi_video_resume(struct notifier_block *nb, @@ -1672,24 +1671,23 @@ static int acpi_video_resume(struct notifier_block *nb, int i; switch (val) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - case PM_RESTORE_PREPARE: - return NOTIFY_DONE; - } - - video = container_of(nb, struct acpi_video_bus, pm_nb); - - dev_info(&video->device->dev, "Restoring backlight state\n"); + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + case PM_POST_RESTORE: + video = container_of(nb, struct acpi_video_bus, pm_nb); + + dev_info(&video->device->dev, "Restoring backlight state\n"); + + for (i = 0; i < video->attached_count; i++) { + video_device = video->attached_array[i].bind_info; + if (video_device && video_device->brightness) + acpi_video_device_lcd_set_level(video_device, + video_device->brightness->curr); + } - for (i = 0; i < video->attached_count; i++) { - video_device = video->attached_array[i].bind_info; - if (video_device && video_device->brightness) - acpi_video_device_lcd_set_level(video_device, - video_device->brightness->curr); + return NOTIFY_OK; } - - return NOTIFY_OK; + return NOTIFY_DONE; } static acpi_status @@ -1698,13 +1696,12 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context, { struct acpi_device *device = context; struct acpi_device *sibling; - int result; if (handle == device->handle) return AE_CTRL_TERMINATE; - result = acpi_bus_get_device(handle, &sibling); - if (result) + sibling = acpi_fetch_acpi_dev(handle); + if (!sibling) return AE_OK; if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME)) @@ -1727,20 +1724,17 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) if (result) return; - if (disable_backlight_sysfs_if > 0) - return; - name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; count++; - acpi_get_parent(device->dev->handle, &acpi_parent); - - pdev = acpi_get_pci_dev(acpi_parent); - if (pdev) { - parent = &pdev->dev; - pci_dev_put(pdev); + if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) { + pdev = acpi_get_pci_dev(acpi_parent); + if (pdev) { + parent = &pdev->dev; + pci_dev_put(pdev); + } } memset(&props, 0, sizeof(struct backlight_properties)); @@ -1765,8 +1759,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) device->backlight->props.brightness = acpi_video_get_brightness(device->backlight); - device->cooling_dev = thermal_cooling_device_register("LCD", - device->dev, &video_cooling_ops); + device->cooling_dev = thermal_cooling_device_register("LCD", device, + &video_cooling_ops); if (IS_ERR(device->cooling_dev)) { /* * Set cooling_dev to NULL so we don't crash trying to free it. @@ -1784,11 +1778,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) &device->cooling_dev->device.kobj, "thermal_cooling"); if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); + pr_info("sysfs link creation failed\n"); + result = sysfs_create_link(&device->cooling_dev->device.kobj, &device->dev->dev.kobj, "device"); if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); + pr_info("Reverse sysfs link creation failed\n"); } static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) @@ -1827,8 +1822,6 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) if (video->backlight_registered) return 0; - acpi_video_run_bcl_for_osi(video); - if (acpi_video_get_backlight_type() != acpi_backlight_video) return 0; @@ -1966,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) struct acpi_video_device *dev; mutex_lock(&video->device_list_lock); - list_for_each_entry(dev, &video->video_device_list, entry) + list_for_each_entry(dev, &video->video_device_list, entry) { acpi_video_dev_remove_notify_handler(dev); + cancel_delayed_work_sync(&dev->switch_brightness_work); + } mutex_unlock(&video->device_list_lock); acpi_video_bus_stop_devices(video); @@ -1994,15 +1989,16 @@ static int instance; static int acpi_video_bus_add(struct acpi_device *device) { struct acpi_video_bus *video; + bool auto_detect; int error; acpi_status status; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, - device->parent->handle, 1, + acpi_dev_parent(device)->handle, 1, acpi_video_bus_match, NULL, device, NULL); if (status == AE_ALREADY_EXISTS) { - printk(KERN_WARNING FW_BUG + pr_info(FW_BUG "Duplicate ACPI video bus devices for the" " same VGA controller, please try module " "parameter \"video.allow_duplicates=1\"" @@ -2029,8 +2025,8 @@ static int acpi_video_bus_add(struct acpi_device *device) } video->device = device; - strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); - strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); + strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); + strscpy(acpi_device_class(device), ACPI_VIDEO_CLASS); device->driver_data = video; acpi_video_bus_find_cap(video); @@ -2045,20 +2041,54 @@ static int acpi_video_bus_add(struct acpi_device *device) if (error) goto err_put_video; - printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", + /* + * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0 + * evaluated to have functional panel brightness control. + */ + acpi_device_fix_up_power_children(device); + + pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n", ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), - video->flags.multihead ? "yes" : "no", - video->flags.rom ? "yes" : "no", - video->flags.post ? "yes" : "no"); + str_yes_no(video->flags.multihead), + str_yes_no(video->flags.rom), + str_yes_no(video->flags.post)); mutex_lock(&video_list_lock); list_add_tail(&video->entry, &video_bus_head); mutex_unlock(&video_list_lock); - acpi_video_bus_register_backlight(video); - acpi_video_bus_add_notify_handler(video); + /* + * If backlight-type auto-detection is used then a native backlight may + * show up later and this may change the result from video to native. + * Therefor normally the userspace visible /sys/class/backlight device + * gets registered separately by the GPU driver calling + * acpi_video_register_backlight() when an internal panel is detected. + * Register the backlight now when not using auto-detection, so that + * when the kernel cmdline or DMI-quirks are used the backlight will + * get registered even if acpi_video_register_backlight() is not called. + */ + acpi_video_run_bcl_for_osi(video); + if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video && + !auto_detect) + acpi_video_bus_register_backlight(video); + + error = acpi_video_bus_add_notify_handler(video); + if (error) + goto err_del; + + error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_video_bus_notify, device); + if (error) + goto err_remove; return 0; +err_remove: + acpi_video_bus_remove_notify_handler(video); +err_del: + mutex_lock(&video_list_lock); + list_del(&video->entry); + mutex_unlock(&video_list_lock); + acpi_video_bus_unregister_backlight(video); err_put_video: acpi_video_bus_put_devices(video); kfree(video->attached_array); @@ -2069,28 +2099,29 @@ err_free_video: return error; } -static int acpi_video_bus_remove(struct acpi_device *device) +static void acpi_video_bus_remove(struct acpi_device *device) { struct acpi_video_bus *video = NULL; if (!device || !acpi_driver_data(device)) - return -EINVAL; + return; video = acpi_driver_data(device); - acpi_video_bus_remove_notify_handler(video); - acpi_video_bus_unregister_backlight(video); - acpi_video_bus_put_devices(video); + acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_video_bus_notify); mutex_lock(&video_list_lock); list_del(&video->entry); mutex_unlock(&video_list_lock); + acpi_video_bus_remove_notify_handler(video); + acpi_video_bus_unregister_backlight(video); + acpi_video_bus_put_devices(video); + kfree(video->attached_array); kfree(video); - - return 0; } static int __init is_i740(struct pci_dev *dev) @@ -2124,25 +2155,6 @@ static int __init intel_opregion_present(void) return opregion; } -static bool dmi_is_desktop(void) -{ - const char *chassis_type; - - chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); - if (!chassis_type) - return false; - - if (!strcmp(chassis_type, "3") || /* 3: Desktop */ - !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ - !strcmp(chassis_type, "5") || /* 5: Pizza Box */ - !strcmp(chassis_type, "6") || /* 6: Mini Tower */ - !strcmp(chassis_type, "7") || /* 7: Tower */ - !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ - return true; - - return false; -} - int acpi_video_register(void) { int ret = 0; @@ -2151,25 +2163,11 @@ int acpi_video_register(void) if (register_count) { /* * if the function of acpi_video_register is already called, - * don't register the acpi_vide_bus again and return no error. + * don't register the acpi_video_bus again and return no error. */ goto leave; } - /* - * We're seeing a lot of bogus backlight interfaces on newer machines - * without a LCD such as desktops, servers and HDMI sticks. Checking - * the lcd flag fixes this, so enable this on any machines which are - * win8 ready (where we also prefer the native backlight driver, so - * normally the acpi_video code should not register there anyways). - */ - if (only_lcd == -1) { - if (dmi_is_desktop() && acpi_osi_is_win8()) - only_lcd = true; - else - only_lcd = false; - } - dmi_check_system(video_dmi_table); ret = acpi_bus_register_driver(&acpi_video_bus); @@ -2194,34 +2192,26 @@ void acpi_video_unregister(void) if (register_count) { acpi_bus_unregister_driver(&acpi_video_bus); register_count = 0; + may_report_brightness_keys = false; } mutex_unlock(®ister_count_mutex); } EXPORT_SYMBOL(acpi_video_unregister); -void acpi_video_unregister_backlight(void) +void acpi_video_register_backlight(void) { struct acpi_video_bus *video; - mutex_lock(®ister_count_mutex); - if (register_count) { - mutex_lock(&video_list_lock); - list_for_each_entry(video, &video_bus_head, entry) - acpi_video_bus_unregister_backlight(video); - mutex_unlock(&video_list_lock); - } - mutex_unlock(®ister_count_mutex); + mutex_lock(&video_list_lock); + list_for_each_entry(video, &video_bus_head, entry) + acpi_video_bus_register_backlight(video); + mutex_unlock(&video_list_lock); } +EXPORT_SYMBOL(acpi_video_register_backlight); bool acpi_video_handles_brightness_key_presses(void) { - bool have_video_busses; - - mutex_lock(&video_list_lock); - have_video_busses = !list_empty(&video_bus_head); - mutex_unlock(&video_list_lock); - - return have_video_busses && + return may_report_brightness_keys && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS); } EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses); @@ -2254,10 +2244,7 @@ static int __init acpi_video_init(void) static void __exit acpi_video_exit(void) { - acpi_video_detect_exit(); acpi_video_unregister(); - - return; } module_init(acpi_video_init); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 95600309ce42..14b24157799c 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI watchdog table parsing support. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) "ACPI: watchdog: " fmt @@ -58,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat) } #endif +static bool acpi_no_watchdog; + static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) { const struct acpi_table_wdat *wdat = NULL; acpi_status status; - if (acpi_disabled) + if (acpi_disabled || acpi_no_watchdog) return NULL; status = acpi_get_table(ACPI_SIG_WDAT, 0, @@ -74,6 +73,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) } if (acpi_watchdog_uses_rtc(wdat)) { + acpi_put_table((struct acpi_table_header *)wdat); pr_info("Skipping WDAT on this system because it uses RTC SRAM\n"); return NULL; } @@ -81,7 +81,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) return wdat; } -/** +/* * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ @@ -91,6 +91,14 @@ bool acpi_has_watchdog(void) } EXPORT_SYMBOL_GPL(acpi_has_watchdog); +/* ACPI watchdog can be disabled on boot command line */ +static int __init disable_acpi_watchdog(char *str) +{ + acpi_no_watchdog = true; + return 1; +} +__setup("acpi_no_watchdog", disable_acpi_watchdog); + void __init acpi_watchdog_init(void) { const struct acpi_wdat_entry *entries; @@ -110,12 +118,12 @@ void __init acpi_watchdog_init(void) /* Watchdog disabled by BIOS */ if (!(wdat->flags & ACPI_WDAT_ENABLED)) - return; + goto fail_put_wdat; /* Skip legacy PCI WDT devices */ if (wdat->pci_segment != 0xff || wdat->pci_bus != 0xff || wdat->pci_device != 0xff || wdat->pci_function != 0xff) - return; + goto fail_put_wdat; INIT_LIST_HEAD(&resource_list); @@ -129,12 +137,11 @@ void __init acpi_watchdog_init(void) gas = &entries[i].register_region; res.start = gas->address; + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); @@ -144,11 +151,7 @@ void __init acpi_watchdog_init(void) found = false; resource_list_for_each_entry(rentry, &resource_list) { if (rentry->res->flags == res.flags && - resource_overlaps(rentry->res, &res)) { - if (res.start < rentry->res->start) - rentry->res->start = res.start; - if (res.end > rentry->res->end) - rentry->res->end = res.end; + resource_union(rentry->res, &res, rentry->res)) { found = true; break; } @@ -176,10 +179,12 @@ void __init acpi_watchdog_init(void) pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE, resources, nresources); if (IS_ERR(pdev)) - pr_err("Device creation failed: %ld\n", PTR_ERR(pdev)); + pr_err("Device creation failed: %pe\n", pdev); kfree(resources); fail_free_resource_list: resource_list_free(&resource_list); +fail_put_wdat: + acpi_put_table((struct acpi_table_header *)wdat); } diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 59700433a96e..8d18af396de9 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -3,8 +3,9 @@ # Makefile for ACPICA Core interpreter # -ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA +ccflags-y := -D_LINUX -DBUILDING_ACPICA ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT +CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation) # use acpi.o to put all files here into acpi.o modparam namespace obj-y += acpi.o @@ -155,6 +156,7 @@ acpi-y += \ utalloc.o \ utascii.o \ utbuffer.o \ + utcksum.o \ utcopy.o \ utexcep.o \ utdebug.o \ diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 5a9c2febc0fb..d7d4649ce66f 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2018 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 8bc935977d8e..662231f4f881 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -3,7 +3,7 @@ * * Name: accommon.h - Common include files for generation of ACPICA source * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -13,7 +13,7 @@ /* * Common set of includes for all ACPICA source files. * We put them here because we don't want to duplicate them - * in the the source code again and again. + * in the source code again and again. * * Note: The order of these include files is important. */ diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index 4ebe18826646..24998f2d7539 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -65,9 +65,7 @@ void cg_write_aml_comment(union acpi_parse_object *op); /* * cvparser */ -void -cv_init_file_tree(struct acpi_table_header *table, - u8 *aml_start, u32 aml_length); +void cv_init_file_tree(struct acpi_table_header *table, FILE * root_file); void cv_clear_op_comments(union acpi_parse_object *op); diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 57d9495e5933..91241bd6917a 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -3,7 +3,7 @@ * * Name: acdebug.h - ACPI/AML debugger * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -16,7 +16,8 @@ #include "acdisasm.h" #endif -#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ +#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ +#define ACPI_DEBUG_LENGTH_FORMAT " (%.4X bits, %.3X bytes)" struct acpi_db_command_info { const char *name; /* Command Name */ @@ -36,12 +37,14 @@ struct acpi_db_argument_info { struct acpi_db_execute_walk { u32 count; u32 max_count; + char name_seg[ACPI_NAMESEG_SIZE + 1]; }; #define PARAM_LIST(pl) pl #define EX_NO_SINGLE_STEP 1 #define EX_SINGLE_STEP 2 +#define EX_ALL 4 /* * dbxface - external debugger interfaces @@ -123,6 +126,8 @@ void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); void acpi_db_evaluate_predefined_names(void); +void acpi_db_evaluate_all(char *name_seg); + /* * dbnames - namespace commands */ @@ -147,6 +152,8 @@ void acpi_db_find_references(char *object_arg); void acpi_db_get_bus_info(void); +acpi_status acpi_db_display_fields(u32 address_space_id); + /* * dbdisply - debug display commands */ @@ -280,4 +287,6 @@ struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name); void acpi_db_uint32_to_hex_string(u32 value, char *buffer); +void acpi_db_generate_interrupt(char *gsiv_arg); + #endif /* __ACDEBUG_H__ */ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index e577f3a40e6a..5d48a344b35f 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -3,7 +3,7 @@ * * Name: acdispat.h - dispatcher (parser to interpreter interface) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index b412aa909907..b40fb3a5ac8a 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -3,7 +3,7 @@ * * Name: acevents.h - Event subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -69,7 +69,8 @@ acpi_status acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked); acpi_status -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info, + u8 clear_on_enable); acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); @@ -187,7 +188,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, u8 acpi_ns_is_locked); void -acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, +acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth, acpi_adr_space_type space_id, u32 function); acpi_status @@ -223,6 +224,11 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle, void *handler_context, void **region_context); acpi_status +acpi_ev_data_table_region_setup(acpi_handle handle, + u32 function, + void *handler_context, void **region_context); + +acpi_status acpi_ev_default_region_setup(acpi_handle handle, u32 function, void *handler_context, void **region_context); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 87d6eb01beaf..c8a750d2674c 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -3,7 +3,7 @@ * * Name: acglobal.h - Declarations for global variables * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -24,15 +24,12 @@ ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list); ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT); ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); +ACPI_INIT_GLOBAL(char *, acpi_gbl_CDAT, NULL); ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); - -#if (!ACPI_REDUCED_HARDWARE) -ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); - -#endif /* !ACPI_REDUCED_HARDWARE */ +ACPI_INIT_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS, NULL); /* These addresses are calculated from the FADT Event Block addresses */ @@ -42,6 +39,12 @@ ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable); ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status); ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable); +#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES +ACPI_GLOBAL(unsigned long, acpi_gbl_xgpe0_block_logical_address); +ACPI_GLOBAL(unsigned long, acpi_gbl_xgpe1_block_logical_address); + +#endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */ + /* * Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is * determined by the revision of the DSDT: If the DSDT revision is less than @@ -122,6 +125,7 @@ ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler); ACPI_GLOBAL(void *, acpi_gbl_table_handler_context); ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler); ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list); +ACPI_GLOBAL(struct acpi_ged_handler_info *, acpi_gbl_ged_handler_list); /* Owner ID support */ @@ -164,6 +168,7 @@ ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list); ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list); ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats); ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking); +ACPI_GLOBAL(u8, acpi_gbl_verbose_leak_dump); #endif /***************************************************************************** @@ -177,7 +182,6 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking); ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct); ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node); ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device); -ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list); extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES]; extern const struct acpi_predefined_names @@ -220,6 +224,8 @@ extern struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG]; ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a); ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b); +ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a_s0); +ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b_s0); /***************************************************************************** * @@ -290,6 +296,7 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list); #ifdef ACPI_DEBUGGER ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE); ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID); +ACPI_INIT_GLOBAL(u32, acpi_gbl_next_cmd_num, 1); ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods); ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support); diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index ef99e2fc37f8..6aec56c65fa0 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -3,7 +3,7 @@ * * Name: achware.h -- hardware specific interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -73,9 +73,15 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width); acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width); +acpi_status acpi_hw_validate_io_block(u64 address, u32 bit_width, u32 count); + /* * hwgpe - GPE support */ +acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg); + +acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg); + u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info); acpi_status @@ -95,11 +101,9 @@ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_event_status *event_status); -acpi_status acpi_hw_disable_all_gpes(void); - acpi_status acpi_hw_enable_all_runtime_gpes(void); -acpi_status acpi_hw_enable_all_wakeup_gpes(void); +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number); acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index c5b2be0b6613..1ee6ac9b2baf 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -3,7 +3,7 @@ * * Name: acinterp.h - Interpreter subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -120,6 +120,9 @@ void acpi_ex_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname); +void +acpi_ex_trace_args(union acpi_operand_object **params, u32 count); + /* * exfield - ACPI AML (p-code) execution - field manipulation */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 99b0da899109..f98640086f4e 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -3,7 +3,7 @@ * * Name: aclocal.h - Internal data types used across the ACPI subsystem * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -134,12 +134,12 @@ struct acpi_namespace_node { union acpi_operand_object *object; /* Interpreter object */ u8 descriptor_type; /* Differentiate object descriptor types */ u8 type; /* ACPI Type associated with this name */ - u8 flags; /* Miscellaneous flags */ - acpi_owner_id owner_id; /* Node creator */ + u16 flags; /* Miscellaneous flags */ union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */ struct acpi_namespace_node *parent; /* Parent node */ struct acpi_namespace_node *child; /* First child */ struct acpi_namespace_node *peer; /* First peer */ + acpi_owner_id owner_id; /* Node creator */ /* * The following fields are used by the ASL compiler and disassembler only @@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state); * expected_return_btypes - Allowed type(s) for the return value */ struct acpi_name_info { - char name[ACPI_NAME_SIZE]; + char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; u16 argument_list; u8 expected_btypes; }; @@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node * converted_object); struct acpi_simple_repair_info { - char name[ACPI_NAME_SIZE]; + char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; u32 unexpected_btypes; u32 package_index; acpi_object_converter object_converter; @@ -454,11 +454,18 @@ struct acpi_gpe_event_info { u8 disable_for_dispatch; /* Masked during dispatching */ }; +/* GPE register address */ + +struct acpi_gpe_address { + u8 space_id; /* Address space where the register exists */ + u64 address; /* 64-bit address of the register */ +}; + /* Information about a GPE register pair, one per each status/enable pair in an array */ struct acpi_gpe_register_info { - struct acpi_generic_address status_address; /* Address of status reg */ - struct acpi_generic_address enable_address; /* Address of enable reg */ + struct acpi_gpe_address status_address; /* Address of status reg */ + struct acpi_gpe_address enable_address; /* Address of enable reg */ u16 base_gpe_number; /* Base GPE number for this register */ u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ u8 enable_for_run; /* GPEs to keep enabled when running */ @@ -536,6 +543,14 @@ struct acpi_field_info { u32 pkg_length; }; +/* Information about the interrupt ID and _EVT of a GED device */ + +struct acpi_ged_handler_info { + struct acpi_ged_handler_info *next; + u32 int_id; /* The interrupt ID that triggers the execution of the evt_method. */ + struct acpi_namespace_node *evt_method; /* The _EVT method to be executed when an interrupt with ID = int_ID is received */ +}; + /***************************************************************************** * * Generic "state" object for stacks @@ -553,25 +568,28 @@ struct acpi_field_info { u8 descriptor_type; /* To differentiate various internal objs */\ u8 flags; \ u16 value; \ - u16 state; + u16 state /* There are 2 bytes available here until the next natural alignment boundary */ struct acpi_common_state { -ACPI_STATE_COMMON}; + ACPI_STATE_COMMON; +}; /* * Update state - used to traverse complex objects such as packages */ struct acpi_update_state { - ACPI_STATE_COMMON union acpi_operand_object *object; + ACPI_STATE_COMMON; + union acpi_operand_object *object; }; /* * Pkg state - used to traverse nested package structures */ struct acpi_pkg_state { - ACPI_STATE_COMMON u32 index; + ACPI_STATE_COMMON; + u32 index; union acpi_operand_object *source_object; union acpi_operand_object *dest_object; struct acpi_walk_state *walk_state; @@ -584,7 +602,8 @@ struct acpi_pkg_state { * Allows nesting of these constructs */ struct acpi_control_state { - ACPI_STATE_COMMON u16 opcode; + ACPI_STATE_COMMON; + u16 opcode; union acpi_parse_object *predicate_op; u8 *aml_predicate_start; /* Start of if/while predicate */ u8 *package_end; /* End of if/while block */ @@ -595,11 +614,13 @@ struct acpi_control_state { * Scope state - current scope during namespace lookups */ struct acpi_scope_state { - ACPI_STATE_COMMON struct acpi_namespace_node *node; + ACPI_STATE_COMMON; + struct acpi_namespace_node *node; }; struct acpi_pscope_state { - ACPI_STATE_COMMON u32 arg_count; /* Number of fixed arguments */ + ACPI_STATE_COMMON; + u32 arg_count; /* Number of fixed arguments */ union acpi_parse_object *op; /* Current op being parsed */ u8 *arg_end; /* Current argument end */ u8 *pkg_end; /* Current package end */ @@ -611,7 +632,8 @@ struct acpi_pscope_state { * states are created when there are nested control methods executing. */ struct acpi_thread_state { - ACPI_STATE_COMMON u8 current_sync_level; /* Mutex Sync (nested acquire) level */ + ACPI_STATE_COMMON; + u8 current_sync_level; /* Mutex Sync (nested acquire) level */ struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */ union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */ acpi_thread_id thread_id; /* Running thread ID */ @@ -622,8 +644,8 @@ struct acpi_thread_state { * AML arguments */ struct acpi_result_values { - ACPI_STATE_COMMON - union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM]; + ACPI_STATE_COMMON; + union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM]; }; typedef @@ -645,7 +667,8 @@ struct acpi_global_notify_handler { * handler/dispatcher. */ struct acpi_notify_info { - ACPI_STATE_COMMON u8 handler_list_id; + ACPI_STATE_COMMON; + u8 handler_list_id; struct acpi_namespace_node *node; union acpi_operand_object *handler_list_head; struct acpi_global_notify_handler *global; @@ -802,7 +825,7 @@ struct acpi_comment_addr_node { /* * File node - used for "Include" operator file stack and - * depdendency tree for the -ca option + * dependency tree for the -ca option */ struct acpi_file_node { void *file; @@ -1067,6 +1090,8 @@ struct acpi_port_info { #define ACPI_ADDRESS_TYPE_IO_RANGE 1 #define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE 2 +#define ACPI_ADDRESS_TYPE_PCC_NUMBER 0xA + /* Resource descriptor types and masks */ #define ACPI_RESOURCE_NAME_LARGE 0x80 @@ -1115,7 +1140,8 @@ struct acpi_port_info { #define ACPI_RESOURCE_NAME_PIN_GROUP 0x90 #define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91 #define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92 -#define ACPI_RESOURCE_NAME_LARGE_MAX 0x92 +#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93 +#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93 /***************************************************************************** * diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index de52cd6e868a..4e9402c02410 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -3,7 +3,7 @@ * * Name: acmacros.h - C macros for the entire subsystem. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -462,7 +462,7 @@ #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) /* - * Macors used for the ASL-/ASL+ converter utility + * Macros used for the ASL-/ASL+ converter utility */ #ifdef ACPI_ASL_COMPILER @@ -477,7 +477,7 @@ #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d); #define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b); #define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a) -#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c); +#define ASL_CV_INIT_FILETREE(a,b) cv_init_file_tree(a,b); #else @@ -492,7 +492,7 @@ #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) #define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) #define ASL_CV_FILE_HAS_SWITCHED(a) 0 -#define ASL_CV_INIT_FILETREE(a,b,c) +#define ASL_CV_INIT_FILETREE(a,b) #endif diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 9bd25f36c608..13f050fecb49 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -3,7 +3,7 @@ * * Name: acnamesp.h - Namespace subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -207,8 +207,6 @@ acpi_ns_dump_object_paths(acpi_object_type type, */ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info); -void acpi_ns_exec_module_code_list(void); - /* * nsarguments - Argument count/type checking for predefined/reserved names */ @@ -258,6 +256,8 @@ u32 acpi_ns_build_normalized_path(struct acpi_namespace_node *node, char *full_path, u32 path_size, u8 no_trailing); +void acpi_ns_normalize_pathname(char *original_path); + char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, u8 no_trailing); diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index ac992b6ebce9..6ffcc7a0a0c2 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -3,7 +3,7 @@ * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -48,7 +48,7 @@ u8 descriptor_type; /* To differentiate various internal objs */\ u8 type; /* acpi_object_type */\ u16 reference_count; /* For object deletion management */\ - u8 flags; + u8 flags /* * Note: There are 3 bytes available here before the * next natural alignment boundary (for both 32/64 cases) @@ -71,10 +71,12 @@ *****************************************************************************/ struct acpi_object_common { -ACPI_OBJECT_COMMON_HEADER}; + ACPI_OBJECT_COMMON_HEADER; +}; struct acpi_object_integer { - ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */ + ACPI_OBJECT_COMMON_HEADER; + u8 fill[3]; /* Prevent warning on some compilers */ u64 value; }; @@ -86,23 +88,26 @@ struct acpi_object_integer { */ #define ACPI_COMMON_BUFFER_INFO(_type) \ _type *pointer; \ - u32 length; + u32 length /* Null terminated, ASCII characters only */ struct acpi_object_string { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_BUFFER_INFO(char); /* String in AML stream or allocated string */ }; struct acpi_object_buffer { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_BUFFER_INFO(u8); /* Buffer in AML stream or allocated buffer */ u32 aml_length; u8 *aml_start; struct acpi_namespace_node *node; /* Link back to parent node */ }; struct acpi_object_package { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *node; /* Link back to parent node */ union acpi_operand_object **elements; /* Array of pointers to acpi_objects */ u8 *aml_start; u32 aml_length; @@ -116,11 +121,13 @@ struct acpi_object_package { *****************************************************************************/ struct acpi_object_event { - ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */ + ACPI_OBJECT_COMMON_HEADER; + acpi_semaphore os_semaphore; /* Actual OS synchronization object */ }; struct acpi_object_mutex { - ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ + ACPI_OBJECT_COMMON_HEADER; + u8 sync_level; /* 0-15, specified in Mutex() call */ u16 acquisition_depth; /* Allow multiple Acquires, same thread */ acpi_mutex os_mutex; /* Actual OS synchronization object */ acpi_thread_id thread_id; /* Current owner of the mutex */ @@ -132,16 +139,19 @@ struct acpi_object_mutex { }; struct acpi_object_region { - ACPI_OBJECT_COMMON_HEADER u8 space_id; + ACPI_OBJECT_COMMON_HEADER; + u8 space_id; struct acpi_namespace_node *node; /* Containing namespace node */ union acpi_operand_object *handler; /* Handler for region access */ union acpi_operand_object *next; acpi_physical_address address; u32 length; + void *pointer; /* Only for data table regions */ }; struct acpi_object_method { - ACPI_OBJECT_COMMON_HEADER u8 info_flags; + ACPI_OBJECT_COMMON_HEADER; + u8 info_flags; u8 param_count; u8 sync_level; union acpi_operand_object *mutex; @@ -153,8 +163,8 @@ struct acpi_object_method { } dispatch; u32 aml_length; - u8 thread_count; acpi_owner_id owner_id; + u8 thread_count; }; /* Flags for info_flags field above */ @@ -177,33 +187,43 @@ struct acpi_object_method { */ #define ACPI_COMMON_NOTIFY_INFO \ union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\ - union acpi_operand_object *handler; /* Handler for Address space */ + union acpi_operand_object *handler /* Handler for Address space */ /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ struct acpi_object_notify_common { -ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; +}; struct acpi_object_device { - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_NOTIFY_INFO struct acpi_gpe_block_info *gpe_block; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; + struct acpi_gpe_block_info *gpe_block; }; struct acpi_object_power_resource { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 system_level; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; + u32 system_level; u32 resource_order; }; struct acpi_object_processor { - ACPI_OBJECT_COMMON_HEADER - /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */ + ACPI_OBJECT_COMMON_HEADER; + + /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */ + u8 proc_id; u8 length; - ACPI_COMMON_NOTIFY_INFO acpi_io_address address; + ACPI_COMMON_NOTIFY_INFO; + acpi_io_address address; }; struct acpi_object_thermal_zone { -ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; +}; /****************************************************************************** * @@ -225,33 +245,42 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; u32 base_byte_offset; /* Byte offset within containing object */\ u32 value; /* Value to store into the Bank or Index register */\ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ - u8 access_length; /* For serial regions/fields */ + u8 access_length /* For serial regions/fields */ /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ struct acpi_object_field_common { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */ }; struct acpi_object_region_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + u16 resource_length; union acpi_operand_object *region_obj; /* Containing op_region object */ u8 *resource_buffer; /* resource_template for serial regions/fields */ u16 pin_number_index; /* Index relative to previous Connection/Template */ + u8 *internal_pcc_buffer; /* Internal buffer for fields associated with PCC */ }; struct acpi_object_bank_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + union acpi_operand_object *region_obj; /* Containing op_region object */ union acpi_operand_object *bank_obj; /* bank_select Register object */ }; struct acpi_object_index_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO - /* - * No "RegionObj" pointer needed since the Index and Data registers - * are each field definitions unto themselves. - */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + + /* + * No "RegionObj" pointer needed since the Index and Data registers + * are each field definitions unto themselves. + */ union acpi_operand_object *index_obj; /* Index register */ union acpi_operand_object *data_obj; /* Data register */ }; @@ -259,7 +288,10 @@ struct acpi_object_index_field { /* The buffer_field is different in that it is part of a Buffer, not an op_region */ struct acpi_object_buffer_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + u8 is_create_field; /* Special case for objects created by create_field() */ + union acpi_operand_object *buffer_obj; /* Containing Buffer object */ }; /****************************************************************************** @@ -269,7 +301,8 @@ struct acpi_object_buffer_field { *****************************************************************************/ struct acpi_object_notify_handler { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *node; /* Parent device */ u32 handler_type; /* Type: Device/System/Both */ acpi_notify_handler handler; /* Handler address */ void *context; @@ -277,11 +310,13 @@ struct acpi_object_notify_handler { }; struct acpi_object_addr_handler { - ACPI_OBJECT_COMMON_HEADER u8 space_id; + ACPI_OBJECT_COMMON_HEADER; + u8 space_id; u8 handler_flags; acpi_adr_space_handler handler; struct acpi_namespace_node *node; /* Parent device */ void *context; + acpi_mutex context_mutex; acpi_adr_space_setup setup; union acpi_operand_object *region_list; /* Regions using this handler */ union acpi_operand_object *next; @@ -303,7 +338,8 @@ struct acpi_object_addr_handler { * The Reference.Class differentiates these types. */ struct acpi_object_reference { - ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */ + ACPI_OBJECT_COMMON_HEADER; + u8 class; /* Reference Class */ u8 target_type; /* Used for Index Op */ u8 resolved; /* Reference has been resolved to a value */ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */ @@ -336,7 +372,8 @@ typedef enum { * Currently: Region and field_unit types */ struct acpi_object_extra { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ struct acpi_namespace_node *scope_node; void *region_context; /* Region-specific data */ u8 *aml_start; @@ -346,14 +383,16 @@ struct acpi_object_extra { /* Additional data that can be attached to namespace nodes */ struct acpi_object_data { - ACPI_OBJECT_COMMON_HEADER acpi_object_handler handler; + ACPI_OBJECT_COMMON_HEADER; + acpi_object_handler handler; void *pointer; }; /* Structure used when objects are cached for reuse */ struct acpi_object_cache_list { - ACPI_OBJECT_COMMON_HEADER union acpi_operand_object *next; /* Link for object cache and internal lists */ + ACPI_OBJECT_COMMON_HEADER; + union acpi_operand_object *next; /* Link for object cache and internal lists */ }; /****************************************************************************** diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 818eba413614..a2a9e51d7ac6 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -3,7 +3,7 @@ * * Name: acopcode.h - AML opcode information for the AML parser and interpreter * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index ab48196ae55e..65a15dee092b 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -3,7 +3,7 @@ * * Module Name: acparser.h - AML Parser subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index d31bb04facb6..da2c45880cc7 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -3,7 +3,7 @@ * * Name: acpredef - Information table for ACPI predefined methods and objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -101,7 +101,7 @@ enum acpi_return_package_types { /* Support macros for users of the predefined info table */ -#define METHOD_PREDEF_ARGS_MAX 4 +#define METHOD_PREDEF_ARGS_MAX 5 #define METHOD_ARG_BIT_WIDTH 3 #define METHOD_ARG_MASK 0x0007 #define ARG_COUNT_IS_MINIMUM 0x8000 @@ -117,6 +117,7 @@ enum acpi_return_package_types { #define METHOD_2ARGS(a1,a2) (2 | (a1 << 3) | (a2 << 6)) #define METHOD_3ARGS(a1,a2,a3) (3 | (a1 << 3) | (a2 << 6) | (a3 << 9)) #define METHOD_4ARGS(a1,a2,a3,a4) (4 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12)) +#define METHOD_5ARGS(a1,a2,a3,a4,a5) (5 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12) | (a5 << 15)) #define METHOD_RETURNS(type) (type) #define METHOD_NO_RETURN_VALUE 0 @@ -327,6 +328,17 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_BMS", METHOD_1ARGS(ACPI_TYPE_INTEGER), METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_BPC", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0), + + {{"_BPS", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (5 Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0), + + {{"_BPT", METHOD_1ARGS(ACPI_TYPE_PACKAGE), + METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_BQC", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -346,6 +358,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_CBA", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See PCI firmware spec 3.0 */ + {{"_CBR", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0), + {{"_CCA", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 5.1 */ @@ -424,6 +440,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER), METHOD_NO_RETURN_VALUE}}, + {{"_DSC", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_DSD", METHOD_0ARGS, /* ACPI 6.0 */ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Buf, 1 Pkg */ PACKAGE_INFO(ACPI_PTYPE2_UUID_PAIR, ACPI_RTYPE_BUFFER, 1, @@ -431,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_DSM", METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, - ACPI_TYPE_PACKAGE), + ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) | + ARG_COUNT_IS_MINIMUM, METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */ {{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER), @@ -631,6 +651,21 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_MTL", METHOD_0ARGS, /* ACPI 6.0 */ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_NBS", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NCH", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIC", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIG", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIH", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + {{"_NTT", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -887,9 +922,39 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_S4W", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_SBA", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0), + + {{"_SBI", METHOD_0ARGS, + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int, 1 Buf) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 1, + ACPI_RTYPE_BUFFER, 1, 0), + + {{"_SBR", + METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, + ACPI_TYPE_INTEGER), + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, + ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER, 1, 0), + {{"_SBS", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_SBT", + METHOD_4ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, + ACPI_TYPE_ANY), + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int, 1 Buf | Int) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, + ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER, 1, 0), + + {{"_SBW", + METHOD_5ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, + ACPI_TYPE_INTEGER, ACPI_TYPE_ANY), + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER, + 1, 0, 0, 0), + {{"_SCP", METHOD_1ARGS(ACPI_TYPE_INTEGER) | ARG_COUNT_IS_MINIMUM, METHOD_NO_RETURN_VALUE}}, /* Acpi 1.0 allowed 1 integer arg. Acpi 3.0 expanded to 3 args. Allow both. */ diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 59ae8b1a6e40..e8a92be5adae 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -3,7 +3,7 @@ * * Name: acresrc.h - Resource Manager function prototypes * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -46,6 +46,7 @@ typedef enum { ACPI_RSC_1BITFLAG, ACPI_RSC_2BITFLAG, ACPI_RSC_3BITFLAG, + ACPI_RSC_6BITFLAG, ACPI_RSC_ADDRESS, ACPI_RSC_BITMASK, ACPI_RSC_BITMASK16, @@ -102,6 +103,7 @@ typedef enum { ACPI_RSD_1BITFLAG, ACPI_RSD_2BITFLAG, ACPI_RSD_3BITFLAG, + ACPI_RSD_6BITFLAG, ACPI_RSD_ADDRESS, ACPI_RSD_DWORDLIST, ACPI_RSD_LITERAL, @@ -295,6 +297,7 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address64[]; extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[]; extern struct acpi_rsconvert_info acpi_rs_convert_gpio[]; extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[]; +extern struct acpi_rsconvert_info acpi_rs_convert_csi2_serial_bus[]; extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[]; extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[]; extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[]; @@ -303,6 +306,7 @@ extern struct acpi_rsconvert_info acpi_rs_convert_pin_config[]; extern struct acpi_rsconvert_info acpi_rs_convert_pin_group[]; extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[]; extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[]; +extern struct acpi_rsconvert_info acpi_rs_convert_clock_input[]; /* These resources require separate get/set tables */ @@ -349,6 +353,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_gpio[]; extern struct acpi_rsdump_info acpi_rs_dump_pin_function[]; extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[]; extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[]; +extern struct acpi_rsdump_info acpi_rs_dump_csi2_serial_bus[]; extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[]; extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[]; extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[]; @@ -357,6 +362,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_pin_config[]; extern struct acpi_rsdump_info acpi_rs_dump_pin_group[]; extern struct acpi_rsdump_info acpi_rs_dump_pin_group_function[]; extern struct acpi_rsdump_info acpi_rs_dump_pin_group_config[]; +extern struct acpi_rsdump_info acpi_rs_dump_clock_input[]; #endif #endif /* __ACRESRC_H__ */ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 14be32961b4c..e690f604cfa0 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -3,7 +3,7 @@ * * Name: acstruct.h - Internal structs * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -167,9 +167,9 @@ struct acpi_evaluate_info { u32 return_flags; /* Used for return value analysis */ u32 return_btype; /* Bitmapped type of the returned object */ u16 param_count; /* Count of the input argument list */ + u16 node_flags; /* Same as Node->Flags */ u8 pass_number; /* Parser pass number */ u8 return_object_type; /* Object type of the returned object */ - u8 node_flags; /* Same as Node->Flags */ u8 flags; /* General flags */ }; @@ -192,6 +192,16 @@ struct acpi_device_walk_info { u32 num_INI; }; +/* Info used by Acpi acpi_db_display_fields */ + +struct acpi_region_walk_info { + u32 debug_level; + u32 count; + acpi_owner_id owner_id; + u8 display_type; + u32 address_space_id; +}; + /* TBD: [Restructure] Merge with struct above */ struct acpi_walk_info { diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 12fac33ce77e..ebef72bf58d0 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -3,7 +3,7 @@ * * Name: actables.h - ACPI table management * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -35,7 +35,8 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc, acpi_status acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc, - acpi_physical_address address, u8 flags); + acpi_physical_address address, + u8 flags, struct acpi_table_header *table); void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc); @@ -86,6 +87,7 @@ acpi_tb_release_table(struct acpi_table_header *table, acpi_status acpi_tb_install_standard_table(acpi_physical_address address, u8 flags, + struct acpi_table_header *table, u8 reload, u8 override, u32 *table_index); void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc); @@ -95,7 +97,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node); acpi_status acpi_tb_install_and_load_table(acpi_physical_address address, - u8 flags, u8 override, u32 *table_index); + u8 flags, + struct acpi_table_header *table, + u8 override, u32 *table_index); acpi_status acpi_tb_unload_table(u32 table_index); @@ -120,11 +124,6 @@ void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header); -u8 acpi_tb_checksum(u8 *buffer, u32 length); - -acpi_status -acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length); - void acpi_tb_check_dsdt_header(void); struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index); diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 3374d41582b5..3990d509bbab 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -3,7 +3,7 @@ * * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -28,6 +28,7 @@ extern const char *acpi_gbl_max_decode[]; extern const char *acpi_gbl_mem_decode[]; extern const char *acpi_gbl_min_decode[]; extern const char *acpi_gbl_mtp_decode[]; +extern const char *acpi_gbl_phy_decode[]; extern const char *acpi_gbl_rng_decode[]; extern const char *acpi_gbl_rw_decode[]; extern const char *acpi_gbl_shr_decode[]; @@ -52,6 +53,8 @@ extern const char *acpi_gbl_sb_decode[]; extern const char *acpi_gbl_fc_decode[]; extern const char *acpi_gbl_pt_decode[]; extern const char *acpi_gbl_ptyp_decode[]; +extern const char *acpi_gbl_clock_input_mode[]; +extern const char *acpi_gbl_clock_input_scale[]; #endif /* @@ -142,10 +145,11 @@ struct acpi_pkg_info { /* acpi_ut_dump_buffer */ -#define DB_BYTE_DISPLAY 1 -#define DB_WORD_DISPLAY 2 -#define DB_DWORD_DISPLAY 4 -#define DB_QWORD_DISPLAY 8 +#define DB_BYTE_DISPLAY 0x01 +#define DB_WORD_DISPLAY 0x02 +#define DB_DWORD_DISPLAY 0x04 +#define DB_QWORD_DISPLAY 0x08 +#define DB_DISPLAY_DATA_ONLY 0x10 /* * utascii - ASCII utilities @@ -157,6 +161,19 @@ u8 acpi_ut_valid_name_char(char character, u32 position); void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count); /* + * utcksum - Checksum utilities + */ +u8 acpi_ut_generate_checksum(void *table, u32 length, u8 original_checksum); + +u8 acpi_ut_checksum(u8 *buffer, u32 length); + +acpi_status +acpi_ut_verify_cdat_checksum(struct acpi_table_cdat *cdat_table, u32 length); + +acpi_status +acpi_ut_verify_checksum(struct acpi_table_header *table, u32 length); + +/* * utnonansi - Non-ANSI C library functions */ void acpi_ut_strupr(char *src_string); @@ -686,22 +703,26 @@ void acpi_ut_delete_address_lists(void); /* * utxferror - various error/warning output functions */ +ACPI_PRINTF_LIKE(5) void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_warning(const char *module_name, u32 line_number, char *pathname, - u8 node_flags, const char *format, ...); + u16 node_flags, const char *format, ...); +ACPI_PRINTF_LIKE(5) void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_info(const char *module_name, u32 line_number, - char *pathname, u8 node_flags, const char *format, ...); + char *pathname, + u16 node_flags, const char *format, ...); +ACPI_PRINTF_LIKE(5) void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_bios_error(const char *module_name, u32 line_number, char *pathname, - u8 node_flags, const char *format, ...); + u16 node_flags, const char *format, ...); void acpi_ut_prefixed_namespace_error(const char *module_name, @@ -731,6 +752,8 @@ const char *acpi_ah_match_uuid(u8 *data); */ #if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP) void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer); + +acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string); #endif #endif /* _ACUTILS_H */ diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 6c05355447c1..c5b544a006c5 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -5,7 +5,7 @@ * Declarations and definitions contained herein are derived * directly from the ACPI specification. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index cdb590176e9d..54d6e51e0b9a 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -3,7 +3,7 @@ * * Module Name: amlresrc.h - AML resource descriptors * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -40,6 +40,7 @@ #define ACPI_RESTAG_IORESTRICTION "_IOR" #define ACPI_RESTAG_LENGTH "_LEN" #define ACPI_RESTAG_LINE "_LIN" +#define ACPI_RESTAG_LOCALPORT "_PRT" #define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */ #define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */ #define ACPI_RESTAG_MAXADDR "_MAX" @@ -49,6 +50,7 @@ #define ACPI_RESTAG_MODE "_MOD" #define ACPI_RESTAG_PARITY "_PAR" #define ACPI_RESTAG_PHASE "_PHA" +#define ACPI_RESTAG_PHYTYPE "_PHY" #define ACPI_RESTAG_PIN "_PIN" #define ACPI_RESTAG_PINCONFIG "_PPI" #define ACPI_RESTAG_PINCONFIG_TYPE "_TYP" @@ -68,6 +70,8 @@ #define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */ #define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8And16(1), 16(2) */ #define ACPI_RESTAG_VENDORDATA "_VEN" +#define ACPI_RESTAG_FQN "_FQN" +#define ACPI_RESTAG_FQD "_FQD" /* Default sizes for "small" resource descriptors */ @@ -257,7 +261,10 @@ struct aml_resource_address16 { struct aml_resource_extended_irq { AML_RESOURCE_LARGE_HEADER_COMMON u8 flags; u8 interrupt_count; - u32 interrupts[1]; + union { + u32 interrupt; + ACPI_FLEX_ARRAY(u32, interrupts); + }; /* res_source_index, res_source optional fields follow */ }; @@ -316,12 +323,26 @@ struct aml_resource_gpio { #define AML_RESOURCE_I2C_SERIALBUSTYPE 1 #define AML_RESOURCE_SPI_SERIALBUSTYPE 2 #define AML_RESOURCE_UART_SERIALBUSTYPE 3 -#define AML_RESOURCE_MAX_SERIALBUSTYPE 3 +#define AML_RESOURCE_CSI2_SERIALBUSTYPE 4 +#define AML_RESOURCE_MAX_SERIALBUSTYPE 4 #define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */ struct aml_resource_common_serialbus { AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON}; +struct aml_resource_csi2_serialbus { + AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON + /* + * Optional fields follow immediately: + * 1) Vendor Data bytes + * 2) Resource Source String + */ +}; + +#define AML_RESOURCE_CSI2_REVISION 1 /* ACPI 6.4 */ +#define AML_RESOURCE_CSI2_TYPE_REVISION 1 /* ACPI 6.4 */ +#define AML_RESOURCE_CSI2_MIN_DATA_LEN 0 /* ACPI 6.4 */ + struct aml_resource_i2c_serialbus { AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON u32 connection_speed; @@ -411,6 +432,20 @@ struct aml_resource_pin_config { */ }; +#define AML_RESOURCE_CLOCK_INPUT_REVISION 1 /* ACPI 6.5 */ + +struct aml_resource_clock_input { + AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id; + u16 flags; + u16 frequency_divisor; + u32 frequency_numerator; + /* + * Optional fields follow immediately: + * 1) Resource Source index + * 2) Resource Source String + */ +}; + #define AML_RESOURCE_PIN_CONFIG_REVISION 1 /* ACPI 6.2 */ struct aml_resource_pin_group { @@ -469,10 +504,6 @@ struct aml_resource_pin_group_config { #define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */ -/* restore default alignment */ - -#pragma pack() - /* Union of all resource descriptors, so we can allocate the worst case */ union aml_resource { @@ -510,12 +541,14 @@ union aml_resource { struct aml_resource_i2c_serialbus i2c_serial_bus; struct aml_resource_spi_serialbus spi_serial_bus; struct aml_resource_uart_serialbus uart_serial_bus; + struct aml_resource_csi2_serialbus csi2_serial_bus; struct aml_resource_common_serialbus common_serial_bus; struct aml_resource_pin_function pin_function; struct aml_resource_pin_config pin_config; struct aml_resource_pin_group pin_group; struct aml_resource_pin_group_function pin_group_function; struct aml_resource_pin_group_config pin_group_config; + struct aml_resource_clock_input clock_input; /* Utility overlays */ @@ -525,6 +558,10 @@ union aml_resource { u8 byte_item; }; +/* restore default alignment */ + +#pragma pack() + /* Interfaces used by both the disassembler and compiler */ void diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c index 9eb68e0751c7..3d99a9048585 100644 --- a/drivers/acpi/acpica/dbcmds.c +++ b/drivers/acpi/acpica/dbcmds.c @@ -1010,6 +1010,64 @@ void acpi_db_display_resources(char *object_arg) acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); } +/******************************************************************************* + * + * FUNCTION: acpi_db_generate_ged + * + * PARAMETERS: ged_arg - Raw GED number, ascii string + * + * RETURN: None + * + * DESCRIPTION: Simulate firing of a GED + * + ******************************************************************************/ + +void acpi_db_generate_interrupt(char *gsiv_arg) +{ + u32 gsiv_number; + struct acpi_ged_handler_info *ged_info = acpi_gbl_ged_handler_list; + + if (!ged_info) { + acpi_os_printf("No GED handling present\n"); + } + + gsiv_number = strtoul(gsiv_arg, NULL, 0); + + while (ged_info) { + + if (ged_info->int_id == gsiv_number) { + struct acpi_object_list arg_list; + union acpi_object arg0; + acpi_handle evt_handle = ged_info->evt_method; + acpi_status status; + + acpi_os_printf("Evaluate GED _EVT (GSIV=%d)\n", + gsiv_number); + + if (!evt_handle) { + acpi_os_printf("Undefined _EVT method\n"); + return; + } + + arg0.integer.type = ACPI_TYPE_INTEGER; + arg0.integer.value = gsiv_number; + + arg_list.count = 1; + arg_list.pointer = &arg0; + + status = + acpi_evaluate_object(evt_handle, NULL, &arg_list, + NULL); + if (ACPI_FAILURE(status)) { + acpi_os_printf("Could not evaluate _EVT\n"); + return; + } + + } + ged_info = ged_info->next; + } +} + #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 9fd9a98a9cbe..8dbab6932049 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -106,6 +106,10 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object) u8 *buffer; acpi_status status; + /* Skip all preceding white space */ + + acpi_ut_remove_whitespace(&string); + /* Generate the final buffer length */ for (i = 0, length = 0; string[i];) { @@ -170,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object) elements = ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS * sizeof(union acpi_object)); + if (!elements) + return (AE_NO_MEMORY); this = string; for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) { diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index 9fcb8ec64681..d41eb9e67500 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -51,6 +51,8 @@ static acpi_adr_space_type acpi_gbl_space_id_list[] = { ACPI_ADR_SPACE_IPMI, ACPI_ADR_SPACE_GPIO, ACPI_ADR_SPACE_GSBUS, + ACPI_ADR_SPACE_PLATFORM_COMM, + ACPI_ADR_SPACE_PLATFORM_RT, ACPI_ADR_SPACE_DATA_TABLE, ACPI_ADR_SPACE_FIXED_HARDWARE }; @@ -237,7 +239,7 @@ void acpi_db_decode_and_display_object(char *target, char *output_type) default: - /* Is not a recognizeable object */ + /* Is not a recognizable object */ acpi_os_printf ("Not a known ACPI internal object, descriptor type %2.2X\n", @@ -513,7 +515,6 @@ void acpi_db_display_results(void) return; } - obj_desc = walk_state->method_desc; node = walk_state->method_node; if (walk_state->results) { @@ -565,7 +566,6 @@ void acpi_db_display_calling_tree(void) return; } - node = walk_state->method_node; acpi_os_printf("Current Control Method Call Tree\n"); while (walk_state) { @@ -647,7 +647,7 @@ void acpi_db_display_object_type(char *object_arg) * * DESCRIPTION: Display the result of an AML opcode * - * Note: Curently only displays the result object if we are single stepping. + * Note: Currently only displays the result object if we are single stepping. * However, this output may be useful in other contexts and could be enabled * to do so if needed. * diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c index 6abb6b834d97..d3a9521e2dc8 100644 --- a/drivers/acpi/acpica/dbexec.c +++ b/drivers/acpi/acpica/dbexec.c @@ -86,7 +86,8 @@ void acpi_db_delete_objects(u32 count, union acpi_object *objects) * * RETURN: Status * - * DESCRIPTION: Execute a control method. + * DESCRIPTION: Execute a control method. Used to evaluate objects via the + * "EXECUTE" or "EVALUATE" commands. * ******************************************************************************/ @@ -160,12 +161,12 @@ acpi_db_execute_method(struct acpi_db_method_info *info, } ACPI_EXCEPTION((AE_INFO, status, - "while executing %s from debugger", + "while executing %s from AML Debugger", info->pathname)); if (status == AE_BUFFER_OVERFLOW) { ACPI_ERROR((AE_INFO, - "Possible overflow of internal debugger " + "Possible buffer overflow within AML Debugger " "buffer (size 0x%X needed 0x%X)", ACPI_DEBUG_BUFFER_SIZE, (u32)return_obj->length)); @@ -314,11 +315,12 @@ acpi_db_execution_walk(acpi_handle obj_handle, status = acpi_evaluate_object(node, NULL, NULL, &return_obj); + acpi_gbl_method_executing = FALSE; + acpi_os_printf("Evaluation of [%4.4s] returned %s\n", acpi_ut_get_node_name(node), acpi_format_exception(status)); - acpi_gbl_method_executing = FALSE; return (AE_OK); } @@ -334,7 +336,8 @@ acpi_db_execution_walk(acpi_handle obj_handle, * RETURN: None * * DESCRIPTION: Execute a control method. Name is relative to the current - * scope. + * scope. Function used for the "EXECUTE", "EVALUATE", and + * "ALL" commands * ******************************************************************************/ @@ -372,6 +375,12 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags) return; } + if ((flags & EX_ALL) && (strlen(name) > 4)) { + acpi_os_printf("Input name (%s) must be a 4-char NameSeg\n", + name); + return; + } + name_string = ACPI_ALLOCATE(strlen(name) + 1); if (!name_string) { return; @@ -389,13 +398,24 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags) return; } - acpi_gbl_db_method_info.name = name_string; - acpi_gbl_db_method_info.args = args; - acpi_gbl_db_method_info.types = types; - acpi_gbl_db_method_info.flags = flags; + /* Command (ALL <nameseg>) to execute all methods of a particular name */ - return_obj.pointer = NULL; - return_obj.length = ACPI_ALLOCATE_BUFFER; + else if (flags & EX_ALL) { + acpi_gbl_db_method_info.name = name_string; + return_obj.pointer = NULL; + return_obj.length = ACPI_ALLOCATE_BUFFER; + acpi_db_evaluate_all(name_string); + ACPI_FREE(name_string); + return; + } else { + acpi_gbl_db_method_info.name = name_string; + acpi_gbl_db_method_info.args = args; + acpi_gbl_db_method_info.types = types; + acpi_gbl_db_method_info.flags = flags; + + return_obj.pointer = NULL; + return_obj.length = ACPI_ALLOCATE_BUFFER; + } status = acpi_db_execute_setup(&acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { @@ -450,10 +470,11 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags) (u32)return_obj.length); acpi_db_dump_external_object(return_obj.pointer, 1); + acpi_os_printf("\n"); /* Dump a _PLD buffer if present */ - if (ACPI_COMPARE_NAME + if (ACPI_COMPARE_NAMESEG ((ACPI_CAST_PTR (struct acpi_namespace_node, acpi_gbl_db_method_info.method)->name.ascii), diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c index c6e25734dc5c..e1b6e54a96ac 100644 --- a/drivers/acpi/acpica/dbfileio.c +++ b/drivers/acpi/acpica/dbfileio.c @@ -93,7 +93,7 @@ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head) while (table_list_head) { table = table_list_head->table; - status = acpi_load_table(table); + status = acpi_load_table(table, NULL); if (ACPI_FAILURE(status)) { if (status == AE_ALREADY_EXISTS) { acpi_os_printf diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index b0b9a26c7db5..554ae35108bd 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -3,7 +3,7 @@ * * Module Name: dbhistry - debugger HISTORY command * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -27,7 +27,6 @@ static HISTORY_INFO acpi_gbl_history_buffer[HISTORY_SIZE]; static u16 acpi_gbl_lo_history = 0; static u16 acpi_gbl_num_history = 0; static u16 acpi_gbl_next_history_index = 0; -u32 acpi_gbl_next_cmd_num = 1; /******************************************************************************* * @@ -121,7 +120,7 @@ void acpi_db_display_history(void) for (i = 0; i < acpi_gbl_num_history; i++) { if (acpi_gbl_history_buffer[history_index].command) { - acpi_os_printf("%3ld %s\n", + acpi_os_printf("%3u %s\n", acpi_gbl_history_buffer[history_index]. cmd_num, acpi_gbl_history_buffer[history_index]. diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 3e5f95390f0d..861b12c334ab 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -37,6 +37,7 @@ acpi_db_match_command_help(const char *command, enum acpi_ex_debugger_commands { CMD_NOT_FOUND = 0, CMD_NULL, + CMD_ALL, CMD_ALLOCATIONS, CMD_ARGS, CMD_ARGUMENTS, @@ -50,6 +51,7 @@ enum acpi_ex_debugger_commands { CMD_EVALUATE, CMD_EXECUTE, CMD_EXIT, + CMD_FIELDS, CMD_FIND, CMD_GO, CMD_HANDLERS, @@ -104,6 +106,7 @@ enum acpi_ex_debugger_commands { CMD_THREADS, CMD_TEST, + CMD_INTERRUPT, #endif }; @@ -114,6 +117,7 @@ enum acpi_ex_debugger_commands { static const struct acpi_db_command_info acpi_gbl_db_commands[] = { {"<NOT FOUND>", 0}, {"<NULL>", 0}, + {"ALL", 1}, {"ALLOCATIONS", 0}, {"ARGS", 0}, {"ARGUMENTS", 0}, @@ -127,6 +131,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = { {"EVALUATE", 1}, {"EXECUTE", 1}, {"EXIT", 0}, + {"FIELDS", 1}, {"FIND", 1}, {"GO", 0}, {"HANDLERS", 0}, @@ -181,6 +186,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = { {"THREADS", 3}, {"TEST", 1}, + {"INTERRUPT", 1}, #endif {NULL, 0} }; @@ -200,6 +206,8 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { "Find ACPI name(s) with wildcards\n"}, {1, " Integrity", "Validate namespace integrity\n"}, {1, " Methods", "Display list of loaded control methods\n"}, + {1, " Fields <AddressSpaceId>", + "Display list of loaded field units by space ID\n"}, {1, " Namespace [Object] [Depth]", "Display loaded namespace tree/subtree\n"}, {1, " Notify <Object> <Value>", "Send a notification on Object\n"}, @@ -218,6 +226,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { {1, " Type <Object>", "Display object type\n"}, {0, "\nControl Method Execution:", "\n"}, + {1, " All <NameSeg>", "Evaluate all objects named NameSeg\n"}, {1, " Evaluate <Namepath> [Arguments]", "Evaluate object or control method\n"}, {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"}, @@ -311,6 +320,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { {1, " Gpes", "Display info on all GPE devices\n"}, {1, " Sci", "Generate an SCI\n"}, {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"}, + {1, " Interrupt <GSIV>", "Simulate an interrupt\n"}, #endif {0, NULL, NULL} }; @@ -432,7 +442,7 @@ static void acpi_db_display_help(char *command) acpi_os_printf("\n"); } else { - /* Display help for all commands that match the subtring */ + /* Display help for all commands that match the substring */ acpi_db_display_command_info(command, TRUE); } @@ -464,16 +474,14 @@ char *acpi_db_get_next_token(char *string, return (NULL); } - /* Remove any spaces at the beginning */ + /* Remove any spaces at the beginning, ignore blank lines */ - if (*string == ' ') { - while (*string && (*string == ' ')) { - string++; - } + while (*string && isspace((int)*string)) { + string++; + } - if (!(*string)) { - return (NULL); - } + if (!(*string)) { + return (NULL); } switch (*string) { @@ -507,6 +515,21 @@ char *acpi_db_get_next_token(char *string, } break; + case '{': + + /* This is the start of a field unit, scan until closing brace */ + + string++; + start = string; + type = ACPI_TYPE_FIELD_UNIT; + + /* Find end of buffer */ + + while (*string && (*string != '}')) { + string++; + } + break; + case '[': /* This is the start of a package, scan until closing bracket */ @@ -551,7 +574,7 @@ char *acpi_db_get_next_token(char *string, /* Find end of token */ - while (*string && (*string != ' ')) { + while (*string && !isspace((int)*string)) { string++; } break; @@ -593,7 +616,7 @@ static u32 acpi_db_get_line(char *input_buffer) input_buffer)) { acpi_os_printf ("Buffer overflow while parsing input line (max %u characters)\n", - sizeof(acpi_gbl_db_parsed_buf)); + (u32)sizeof(acpi_gbl_db_parsed_buf)); return (0); } @@ -674,6 +697,7 @@ acpi_db_command_dispatch(char *input_buffer, union acpi_parse_object *op) { u32 temp; + u64 temp64; u32 command_index; u32 param_count; char *command_line; @@ -689,7 +713,6 @@ acpi_db_command_dispatch(char *input_buffer, param_count = acpi_db_get_line(input_buffer); command_index = acpi_db_match_command(acpi_gbl_db_args[0]); - temp = 0; /* * We don't want to add the !! command to the history buffer. It @@ -723,6 +746,15 @@ acpi_db_command_dispatch(char *input_buffer, } break; + case CMD_ALL: + + acpi_os_printf("Executing all objects with NameSeg: %s\n", + acpi_gbl_db_args[1]); + acpi_db_execute(acpi_gbl_db_args[1], &acpi_gbl_db_args[2], + &acpi_gbl_db_arg_types[2], + EX_NO_SINGLE_STEP | EX_ALL); + break; + case CMD_ALLOCATIONS: #ifdef ACPI_DBG_TRACK_ALLOCATIONS @@ -790,6 +822,21 @@ acpi_db_command_dispatch(char *input_buffer, status = acpi_db_find_name_in_namespace(acpi_gbl_db_args[1]); break; + case CMD_FIELDS: + + status = acpi_ut_strtoul64(acpi_gbl_db_args[1], &temp64); + + if (ACPI_FAILURE(status) + || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) { + acpi_os_printf + ("Invalid address space ID: must be between 0 and %u inclusive\n", + ACPI_NUM_PREDEFINED_REGIONS - 1); + return (AE_OK); + } + + status = acpi_db_display_fields((u32)temp64); + break; + case CMD_GO: acpi_gbl_cm_single_step = FALSE; @@ -853,24 +900,24 @@ acpi_db_command_dispatch(char *input_buffer, if (param_count == 0) { acpi_os_printf - ("Current debug level for file output is: %8.8lX\n", + ("Current debug level for file output is: %8.8X\n", acpi_gbl_db_debug_level); acpi_os_printf - ("Current debug level for console output is: %8.8lX\n", + ("Current debug level for console output is: %8.8X\n", acpi_gbl_db_console_debug_level); } else if (param_count == 2) { temp = acpi_gbl_db_console_debug_level; acpi_gbl_db_console_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf - ("Debug Level for console output was %8.8lX, now %8.8lX\n", + ("Debug Level for console output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_console_debug_level); } else { temp = acpi_gbl_db_debug_level; acpi_gbl_db_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf - ("Debug Level for file output was %8.8lX, now %8.8lX\n", + ("Debug Level for file output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_debug_level); } break; @@ -1020,6 +1067,11 @@ acpi_db_command_dispatch(char *input_buffer, acpi_os_printf("Event command not implemented\n"); break; + case CMD_INTERRUPT: + + acpi_db_generate_interrupt(acpi_gbl_db_args[1]); + break; + case CMD_GPE: acpi_db_generate_gpe(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c index d8b7a0fe92ec..889d13828e49 100644 --- a/drivers/acpi/acpica/dbmethod.c +++ b/drivers/acpi/acpica/dbmethod.c @@ -21,6 +21,8 @@ static acpi_status acpi_db_walk_for_execute(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); +static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node); + /******************************************************************************* * * FUNCTION: acpi_db_set_method_breakpoint @@ -302,6 +304,10 @@ acpi_status acpi_db_disassemble_method(char *name) } status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); + if (ACPI_FAILURE(status)) { + return (status); + } + walk_state->owner_id = obj_desc->method.owner_id; /* Push start scope on scope stack and make it current */ @@ -317,6 +323,10 @@ acpi_status acpi_db_disassemble_method(char *name) walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE; status = acpi_ps_parse_aml(walk_state); + if (ACPI_FAILURE(status)) { + return (status); + } + (void)acpi_dm_parse_deferred_ops(op); /* Now we can disassemble the method */ @@ -338,42 +348,26 @@ acpi_status acpi_db_disassemble_method(char *name) /******************************************************************************* * - * FUNCTION: acpi_db_walk_for_execute + * FUNCTION: acpi_db_evaluate_object * - * PARAMETERS: Callback from walk_namespace + * PARAMETERS: node - Namespace node for the object * * RETURN: Status * - * DESCRIPTION: Batch execution module. Currently only executes predefined - * ACPI names. + * DESCRIPTION: Main execution function for the Evaluate/Execute/All debugger + * commands. * ******************************************************************************/ -static acpi_status -acpi_db_walk_for_execute(acpi_handle obj_handle, - u32 nesting_level, void *context, void **return_value) +static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node) { - struct acpi_namespace_node *node = - (struct acpi_namespace_node *)obj_handle; - struct acpi_db_execute_walk *info = - (struct acpi_db_execute_walk *)context; - struct acpi_buffer return_obj; - acpi_status status; char *pathname; u32 i; struct acpi_device_info *obj_info; struct acpi_object_list param_objects; union acpi_object params[ACPI_METHOD_NUM_ARGS]; - const union acpi_predefined_info *predefined; - - predefined = acpi_ut_match_predefined_method(node->name.ascii); - if (!predefined) { - return (AE_OK); - } - - if (node->type == ACPI_TYPE_LOCAL_SCOPE) { - return (AE_OK); - } + struct acpi_buffer return_obj; + acpi_status status; pathname = acpi_ns_get_external_pathname(node); if (!pathname) { @@ -382,7 +376,7 @@ acpi_db_walk_for_execute(acpi_handle obj_handle, /* Get the object info for number of method parameters */ - status = acpi_get_object_info(obj_handle, &obj_info); + status = acpi_get_object_info(node, &obj_info); if (ACPI_FAILURE(status)) { ACPI_FREE(pathname); return (status); @@ -413,14 +407,67 @@ acpi_db_walk_for_execute(acpi_handle obj_handle, acpi_gbl_method_executing = TRUE; status = acpi_evaluate_object(node, NULL, ¶m_objects, &return_obj); + acpi_gbl_method_executing = FALSE; acpi_os_printf("%-32s returned %s\n", pathname, acpi_format_exception(status)); - acpi_gbl_method_executing = FALSE; + if (return_obj.length) { + acpi_os_printf("Evaluation of %s returned object %p, " + "external buffer length %X\n", + pathname, return_obj.pointer, + (u32)return_obj.length); + + acpi_db_dump_external_object(return_obj.pointer, 1); + acpi_os_printf("\n"); + } + ACPI_FREE(pathname); /* Ignore status from method execution */ + return (AE_OK); + + /* Update count, check if we have executed enough methods */ + +} + +/******************************************************************************* + * + * FUNCTION: acpi_db_walk_for_execute + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status + * + * DESCRIPTION: Batch execution function. Evaluates all "predefined" objects -- + * the nameseg begins with an underscore. + * + ******************************************************************************/ + +static acpi_status +acpi_db_walk_for_execute(acpi_handle obj_handle, + u32 nesting_level, void *context, void **return_value) +{ + struct acpi_namespace_node *node = + (struct acpi_namespace_node *)obj_handle; + struct acpi_db_execute_walk *info = + (struct acpi_db_execute_walk *)context; + acpi_status status; + const union acpi_predefined_info *predefined; + + predefined = acpi_ut_match_predefined_method(node->name.ascii); + if (!predefined) { + return (AE_OK); + } + + if (node->type == ACPI_TYPE_LOCAL_SCOPE) { + return (AE_OK); + } + + acpi_db_evaluate_object(node); + + /* Ignore status from object evaluation */ + status = AE_OK; /* Update count, check if we have executed enough methods */ @@ -435,6 +482,52 @@ acpi_db_walk_for_execute(acpi_handle obj_handle, /******************************************************************************* * + * FUNCTION: acpi_db_walk_for_execute_all + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status + * + * DESCRIPTION: Batch execution function. Evaluates all objects whose path ends + * with the nameseg "Info->NameSeg". Used for the "ALL" command. + * + ******************************************************************************/ + +static acpi_status +acpi_db_walk_for_execute_all(acpi_handle obj_handle, + u32 nesting_level, + void *context, void **return_value) +{ + struct acpi_namespace_node *node = + (struct acpi_namespace_node *)obj_handle; + struct acpi_db_execute_walk *info = + (struct acpi_db_execute_walk *)context; + acpi_status status; + + if (!ACPI_COMPARE_NAMESEG(node->name.ascii, info->name_seg)) { + return (AE_OK); + } + + if (node->type == ACPI_TYPE_LOCAL_SCOPE) { + return (AE_OK); + } + + /* Now evaluate the input object (node) */ + + acpi_db_evaluate_object(node); + + /* Ignore status from method execution */ + + status = AE_OK; + + /* Update count of executed methods/objects */ + + info->count++; + return (status); +} + +/******************************************************************************* + * * FUNCTION: acpi_db_evaluate_predefined_names * * PARAMETERS: None @@ -462,3 +555,35 @@ void acpi_db_evaluate_predefined_names(void) acpi_os_printf("Evaluated %u predefined names in the namespace\n", info.count); } + +/******************************************************************************* + * + * FUNCTION: acpi_db_evaluate_all + * + * PARAMETERS: none_acpi_gbl_db_method_info + * + * RETURN: None + * + * DESCRIPTION: Namespace batch execution. Implements the "ALL" command. + * Execute all namepaths whose final nameseg matches the + * input nameseg. + * + ******************************************************************************/ + +void acpi_db_evaluate_all(char *name_seg) +{ + struct acpi_db_execute_walk info; + + info.count = 0; + info.max_count = ACPI_UINT32_MAX; + ACPI_COPY_NAMESEG(info.name_seg, name_seg); + info.name_seg[ACPI_NAMESEG_SIZE] = 0; + + /* Search all nodes in namespace */ + + (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_db_walk_for_execute_all, + NULL, (void *)&info, NULL); + + acpi_os_printf("Evaluated %u names in the namespace\n", info.count); +} diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 992bd7b92540..c9131259f717 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -10,6 +10,7 @@ #include "acnamesp.h" #include "acdebug.h" #include "acpredef.h" +#include "acinterp.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbnames") @@ -354,7 +355,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg) char acpi_name[5] = "____"; char *acpi_name_ptr = acpi_name; - if (strlen(name_arg) > ACPI_NAME_SIZE) { + if (strlen(name_arg) > ACPI_NAMESEG_SIZE) { acpi_os_printf("Name must be no longer than 4 characters\n"); return (AE_OK); } @@ -504,6 +505,90 @@ acpi_db_walk_for_object_counts(acpi_handle obj_handle, /******************************************************************************* * + * FUNCTION: acpi_db_walk_for_fields + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status + * + * DESCRIPTION: Display short info about objects in the namespace + * + ******************************************************************************/ + +static acpi_status +acpi_db_walk_for_fields(acpi_handle obj_handle, + u32 nesting_level, void *context, void **return_value) +{ + union acpi_object *ret_value; + struct acpi_region_walk_info *info = + (struct acpi_region_walk_info *)context; + struct acpi_buffer buffer; + acpi_status status; + struct acpi_namespace_node *node = acpi_ns_validate_handle(obj_handle); + + if (!node) { + return (AE_OK); + } + if (node->object->field.region_obj->region.space_id != + info->address_space_id) { + return (AE_OK); + } + + info->count++; + + /* Get and display the full pathname to this object */ + + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE); + if (ACPI_FAILURE(status)) { + acpi_os_printf("Could Not get pathname for object %p\n", + obj_handle); + return (AE_OK); + } + + acpi_os_printf("%s ", (char *)buffer.pointer); + ACPI_FREE(buffer.pointer); + + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_os_printf("Could Not evaluate object %p\n", + obj_handle); + return (AE_OK); + } + /* + * Since this is a field unit, surround the output in braces + */ + acpi_os_printf("{"); + + ret_value = (union acpi_object *)buffer.pointer; + switch (ret_value->type) { + case ACPI_TYPE_INTEGER: + + acpi_os_printf("%8.8X%8.8X", + ACPI_FORMAT_UINT64(ret_value->integer.value)); + break; + + case ACPI_TYPE_BUFFER: + + acpi_ut_dump_buffer(ret_value->buffer.pointer, + ret_value->buffer.length, + DB_DISPLAY_DATA_ONLY | DB_BYTE_DISPLAY, 0); + break; + + default: + + break; + } + acpi_os_printf("}\n"); + + ACPI_FREE(buffer.pointer); + + return (AE_OK); +} + +/******************************************************************************* + * * FUNCTION: acpi_db_walk_for_specific_objects * * PARAMETERS: Callback from walk_namespace @@ -571,6 +656,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg) object_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info)); + if (!object_info) + return (AE_NO_MEMORY); + /* Walk the namespace from the root */ (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, @@ -630,6 +718,39 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg) /******************************************************************************* * + * FUNCTION: acpi_db_display_fields + * + * PARAMETERS: obj_type_arg - Type of object to display + * display_count_arg - Max depth to display + * + * RETURN: None + * + * DESCRIPTION: Display objects in the namespace of the requested type + * + ******************************************************************************/ + +acpi_status acpi_db_display_fields(u32 address_space_id) +{ + struct acpi_region_walk_info info; + + info.count = 0; + info.owner_id = ACPI_OWNER_ID_MAX; + info.debug_level = ACPI_UINT32_MAX; + info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT; + info.address_space_id = address_space_id; + + /* Walk the namespace from the root */ + + (void)acpi_walk_namespace(ACPI_TYPE_LOCAL_REGION_FIELD, + ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + acpi_db_walk_for_fields, NULL, (void *)&info, + NULL); + + return (AE_OK); +} + +/******************************************************************************* + * * FUNCTION: acpi_db_integrity_walk * * PARAMETERS: Callback from walk_namespace @@ -904,7 +1025,7 @@ acpi_db_bus_walk(acpi_handle obj_handle, * * RETURN: None * - * DESCRIPTION: Display info about system busses. + * DESCRIPTION: Display info about system buses. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index a1c76bf21122..95ab91b35f29 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -47,7 +47,7 @@ acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state) /* Ignore control codes, they are not errors */ - if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) { + if (ACPI_CNTL_EXCEPTION(status)) { return; } @@ -243,7 +243,7 @@ acpi_db_display_internal_object(union acpi_operand_object *obj_desc, acpi_os_printf("[%s] ", acpi_ut_get_reference_name(obj_desc)); - /* Decode the refererence */ + /* Decode the reference */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: @@ -394,7 +394,6 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state) u8 display_locals = FALSE; node = walk_state->method_node; - obj_desc = walk_state->method_desc; /* There are no locals for the module-level code case */ @@ -465,7 +464,6 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state) u8 display_args = FALSE; node = walk_state->method_node; - obj_desc = walk_state->method_desc; /* There are no arguments for the module-level code case */ diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c index bf620937c79b..3af88e70238c 100644 --- a/drivers/acpi/acpica/dbstats.c +++ b/drivers/acpi/acpica/dbstats.c @@ -341,17 +341,17 @@ acpi_status acpi_db_display_statistics(char *type_arg) "ACPI_TYPE", "NODES", "OBJECTS"); for (i = 0; i < ACPI_TYPE_NS_NODE_MAX; i++) { - acpi_os_printf("%16.16s % 10ld% 10ld\n", + acpi_os_printf("%16.16s %10u %10u\n", acpi_ut_get_type_name(i), acpi_gbl_node_type_count[i], acpi_gbl_obj_type_count[i]); } - acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown", + acpi_os_printf("%16.16s %10u %10u\n", "Misc/Unknown", acpi_gbl_node_type_count_misc, acpi_gbl_obj_type_count_misc); - acpi_os_printf("%16.16s % 10ld% 10ld\n", "TOTALS:", + acpi_os_printf("%16.16s %10u %10u\n", "TOTALS:", acpi_gbl_num_nodes, acpi_gbl_num_objects); break; @@ -379,16 +379,14 @@ acpi_status acpi_db_display_statistics(char *type_arg) case CMD_STAT_MISC: acpi_os_printf("\nMiscellaneous Statistics:\n\n"); - acpi_os_printf("Calls to AcpiPsFind:.. ........% 7ld\n", + acpi_os_printf("%-28s: %7u\n", "Calls to AcpiPsFind", acpi_gbl_ps_find_count); - acpi_os_printf("Calls to AcpiNsLookup:..........% 7ld\n", + acpi_os_printf("%-28s: %7u\n", "Calls to AcpiNsLookup", acpi_gbl_ns_lookup_count); - acpi_os_printf("\n"); - - acpi_os_printf("Mutex usage:\n\n"); + acpi_os_printf("\nMutex usage:\n\n"); for (i = 0; i < ACPI_NUM_MUTEX; i++) { - acpi_os_printf("%-28s: % 7ld\n", + acpi_os_printf("%-28s: %7u\n", acpi_ut_get_mutex_name(i), acpi_gbl_mutex_info[i].use_count); } @@ -399,87 +397,87 @@ acpi_status acpi_db_display_statistics(char *type_arg) acpi_os_printf("\nInternal object sizes:\n\n"); acpi_os_printf("Common %3d\n", - sizeof(struct acpi_object_common)); + (u32)sizeof(struct acpi_object_common)); acpi_os_printf("Number %3d\n", - sizeof(struct acpi_object_integer)); + (u32)sizeof(struct acpi_object_integer)); acpi_os_printf("String %3d\n", - sizeof(struct acpi_object_string)); + (u32)sizeof(struct acpi_object_string)); acpi_os_printf("Buffer %3d\n", - sizeof(struct acpi_object_buffer)); + (u32)sizeof(struct acpi_object_buffer)); acpi_os_printf("Package %3d\n", - sizeof(struct acpi_object_package)); + (u32)sizeof(struct acpi_object_package)); acpi_os_printf("BufferField %3d\n", - sizeof(struct acpi_object_buffer_field)); + (u32)sizeof(struct acpi_object_buffer_field)); acpi_os_printf("Device %3d\n", - sizeof(struct acpi_object_device)); + (u32)sizeof(struct acpi_object_device)); acpi_os_printf("Event %3d\n", - sizeof(struct acpi_object_event)); + (u32)sizeof(struct acpi_object_event)); acpi_os_printf("Method %3d\n", - sizeof(struct acpi_object_method)); + (u32)sizeof(struct acpi_object_method)); acpi_os_printf("Mutex %3d\n", - sizeof(struct acpi_object_mutex)); + (u32)sizeof(struct acpi_object_mutex)); acpi_os_printf("Region %3d\n", - sizeof(struct acpi_object_region)); + (u32)sizeof(struct acpi_object_region)); acpi_os_printf("PowerResource %3d\n", - sizeof(struct acpi_object_power_resource)); + (u32)sizeof(struct acpi_object_power_resource)); acpi_os_printf("Processor %3d\n", - sizeof(struct acpi_object_processor)); + (u32)sizeof(struct acpi_object_processor)); acpi_os_printf("ThermalZone %3d\n", - sizeof(struct acpi_object_thermal_zone)); + (u32)sizeof(struct acpi_object_thermal_zone)); acpi_os_printf("RegionField %3d\n", - sizeof(struct acpi_object_region_field)); + (u32)sizeof(struct acpi_object_region_field)); acpi_os_printf("BankField %3d\n", - sizeof(struct acpi_object_bank_field)); + (u32)sizeof(struct acpi_object_bank_field)); acpi_os_printf("IndexField %3d\n", - sizeof(struct acpi_object_index_field)); + (u32)sizeof(struct acpi_object_index_field)); acpi_os_printf("Reference %3d\n", - sizeof(struct acpi_object_reference)); + (u32)sizeof(struct acpi_object_reference)); acpi_os_printf("Notify %3d\n", - sizeof(struct acpi_object_notify_handler)); + (u32)sizeof(struct acpi_object_notify_handler)); acpi_os_printf("AddressSpace %3d\n", - sizeof(struct acpi_object_addr_handler)); + (u32)sizeof(struct acpi_object_addr_handler)); acpi_os_printf("Extra %3d\n", - sizeof(struct acpi_object_extra)); + (u32)sizeof(struct acpi_object_extra)); acpi_os_printf("Data %3d\n", - sizeof(struct acpi_object_data)); + (u32)sizeof(struct acpi_object_data)); acpi_os_printf("\n"); acpi_os_printf("ParseObject %3d\n", - sizeof(struct acpi_parse_obj_common)); + (u32)sizeof(struct acpi_parse_obj_common)); acpi_os_printf("ParseObjectNamed %3d\n", - sizeof(struct acpi_parse_obj_named)); + (u32)sizeof(struct acpi_parse_obj_named)); acpi_os_printf("ParseObjectAsl %3d\n", - sizeof(struct acpi_parse_obj_asl)); + (u32)sizeof(struct acpi_parse_obj_asl)); acpi_os_printf("OperandObject %3d\n", - sizeof(union acpi_operand_object)); + (u32)sizeof(union acpi_operand_object)); acpi_os_printf("NamespaceNode %3d\n", - sizeof(struct acpi_namespace_node)); + (u32)sizeof(struct acpi_namespace_node)); acpi_os_printf("AcpiObject %3d\n", - sizeof(union acpi_object)); + (u32)sizeof(union acpi_object)); acpi_os_printf("\n"); acpi_os_printf("Generic State %3d\n", - sizeof(union acpi_generic_state)); + (u32)sizeof(union acpi_generic_state)); acpi_os_printf("Common State %3d\n", - sizeof(struct acpi_common_state)); + (u32)sizeof(struct acpi_common_state)); acpi_os_printf("Control State %3d\n", - sizeof(struct acpi_control_state)); + (u32)sizeof(struct acpi_control_state)); acpi_os_printf("Update State %3d\n", - sizeof(struct acpi_update_state)); + (u32)sizeof(struct acpi_update_state)); acpi_os_printf("Scope State %3d\n", - sizeof(struct acpi_scope_state)); + (u32)sizeof(struct acpi_scope_state)); acpi_os_printf("Parse Scope %3d\n", - sizeof(struct acpi_pscope_state)); + (u32)sizeof(struct acpi_pscope_state)); acpi_os_printf("Package State %3d\n", - sizeof(struct acpi_pkg_state)); + (u32)sizeof(struct acpi_pkg_state)); acpi_os_printf("Thread State %3d\n", - sizeof(struct acpi_thread_state)); + (u32)sizeof(struct acpi_thread_state)); acpi_os_printf("Result Values %3d\n", - sizeof(struct acpi_result_values)); + (u32)sizeof(struct acpi_result_values)); acpi_os_printf("Notify Info %3d\n", - sizeof(struct acpi_notify_info)); + (u32)sizeof(struct acpi_notify_info)); break; case CMD_STAT_STACK: diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c index 8a5462439a97..6db44a5ac786 100644 --- a/drivers/acpi/acpica/dbtest.c +++ b/drivers/acpi/acpica/dbtest.c @@ -10,6 +10,7 @@ #include "acdebug.h" #include "acnamesp.h" #include "acpredef.h" +#include "acinterp.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbtest") @@ -33,6 +34,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length); static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node); static acpi_status +acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc); + +static acpi_status acpi_db_read_from_object(struct acpi_namespace_node *node, acpi_object_type expected_type, union acpi_object **value); @@ -74,7 +78,7 @@ static struct acpi_db_argument_info acpi_db_test_types[] = { static acpi_handle read_handle = NULL; static acpi_handle write_handle = NULL; -/* ASL Definitions of the debugger read/write control methods */ +/* ASL Definitions of the debugger read/write control methods. AML below. */ #if 0 definition_block("ssdt.aml", "SSDT", 2, "Intel", "DEBUG", 0x00000001) @@ -227,10 +231,8 @@ static void acpi_db_test_all_objects(void) * RETURN: Status * * DESCRIPTION: Test one namespace object. Supported types are Integer, - * String, Buffer, buffer_field, and field_unit. All other object - * types are simply ignored. - * - * Note: Support for Packages is not implemented. + * String, Buffer, Package, buffer_field, and field_unit. + * All other object types are simply ignored. * ******************************************************************************/ @@ -240,7 +242,6 @@ acpi_db_test_one_object(acpi_handle obj_handle, { struct acpi_namespace_node *node; union acpi_operand_object *obj_desc; - union acpi_operand_object *region_obj; acpi_object_type local_type; u32 bit_length = 0; u32 byte_length = 0; @@ -281,18 +282,21 @@ acpi_db_test_one_object(acpi_handle obj_handle, break; case ACPI_TYPE_FIELD_UNIT: - case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: + local_type = ACPI_TYPE_FIELD_UNIT; + break; + + case ACPI_TYPE_BUFFER_FIELD: + /* + * The returned object will be a Buffer if the field length + * is larger than the size of an Integer (32 or 64 bits + * depending on the DSDT version). + */ local_type = ACPI_TYPE_INTEGER; if (obj_desc) { - /* - * Returned object will be a Buffer if the field length - * is larger than the size of an Integer (32 or 64 bits - * depending on the DSDT version). - */ bit_length = obj_desc->common_field.bit_length; byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length); if (bit_length > acpi_gbl_integer_bit_width) { @@ -303,7 +307,7 @@ acpi_db_test_one_object(acpi_handle obj_handle, default: - /* Ignore all other types */ + /* Ignore all non-data types - Methods, Devices, Scopes, etc. */ return (AE_OK); } @@ -314,40 +318,10 @@ acpi_db_test_one_object(acpi_handle obj_handle, acpi_ut_get_type_name(node->type), node->name.ascii); if (!obj_desc) { - acpi_os_printf(" Ignoring, no attached object\n"); + acpi_os_printf(" No attached sub-object, ignoring\n"); return (AE_OK); } - /* - * Check for unsupported region types. Note: acpi_exec simulates - * access to system_memory, system_IO, PCI_Config, and EC. - */ - switch (node->type) { - case ACPI_TYPE_LOCAL_REGION_FIELD: - - region_obj = obj_desc->field.region_obj; - switch (region_obj->region.space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - case ACPI_ADR_SPACE_SYSTEM_IO: - case ACPI_ADR_SPACE_PCI_CONFIG: - - break; - - default: - - acpi_os_printf - (" %s space is not supported in this command [%4.4s]\n", - acpi_ut_get_region_name(region_obj->region. - space_id), - region_obj->region.node->name.ascii); - return (AE_OK); - } - break; - - default: - break; - } - /* At this point, we have resolved the object to one of the major types */ switch (local_type) { @@ -371,6 +345,11 @@ acpi_db_test_one_object(acpi_handle obj_handle, status = acpi_db_test_package_type(node); break; + case ACPI_TYPE_FIELD_UNIT: + + status = acpi_db_test_field_unit_type(obj_desc); + break; + default: acpi_os_printf(" Ignoring, type not implemented (%2.2X)", @@ -382,24 +361,8 @@ acpi_db_test_one_object(acpi_handle obj_handle, if (ACPI_FAILURE(status)) { status = AE_OK; - goto exit; - } - - switch (node->type) { - case ACPI_TYPE_LOCAL_REGION_FIELD: - - region_obj = obj_desc->field.region_obj; - acpi_os_printf(" (%s)", - acpi_ut_get_region_name(region_obj->region. - space_id)); - - break; - - default: - break; } -exit: acpi_os_printf("\n"); return (status); } @@ -444,7 +407,7 @@ acpi_db_test_integer_type(struct acpi_namespace_node *node, u32 bit_length) return (status); } - acpi_os_printf(" (%4.4X/%3.3X) %8.8X%8.8X", + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " %8.8X%8.8X", bit_length, ACPI_ROUND_BITS_UP_TO_BYTES(bit_length), ACPI_FORMAT_UINT64(temp1->integer.value)); @@ -558,8 +521,9 @@ acpi_db_test_buffer_type(struct acpi_namespace_node *node, u32 bit_length) /* Emit a few bytes of the buffer */ - acpi_os_printf(" (%4.4X/%3.3X)", bit_length, temp1->buffer.length); - for (i = 0; ((i < 4) && (i < byte_length)); i++) { + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT, bit_length, + temp1->buffer.length); + for (i = 0; ((i < 8) && (i < byte_length)); i++) { acpi_os_printf(" %2.2X", temp1->buffer.pointer[i]); } acpi_os_printf("... "); @@ -665,8 +629,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length) return (status); } - acpi_os_printf(" (%4.4X/%3.3X) \"%s\"", (temp1->string.length * 8), - temp1->string.length, temp1->string.pointer); + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " \"%s\"", + (temp1->string.length * 8), temp1->string.length, + temp1->string.pointer); /* Write a new value */ @@ -750,13 +715,80 @@ static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node) return (status); } - acpi_os_printf(" %8.8X Elements", temp1->package.count); + acpi_os_printf(" %.2X Elements", temp1->package.count); acpi_os_free(temp1); return (status); } /******************************************************************************* * + * FUNCTION: acpi_db_test_field_unit_type + * + * PARAMETERS: obj_desc - A field unit object + * + * RETURN: Status + * + * DESCRIPTION: Test read/write on a named field unit. + * + ******************************************************************************/ + +static acpi_status +acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc) +{ + union acpi_operand_object *region_obj; + u32 bit_length = 0; + u32 byte_length = 0; + acpi_status status = AE_OK; + union acpi_operand_object *ret_buffer_desc; + + /* Supported spaces are memory/io/pci_config */ + + region_obj = obj_desc->field.region_obj; + switch (region_obj->region.space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + case ACPI_ADR_SPACE_SYSTEM_IO: + case ACPI_ADR_SPACE_PCI_CONFIG: + + /* Need the interpreter to execute */ + + acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); + acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + + /* Exercise read-then-write */ + + status = + acpi_ex_read_data_from_field(NULL, obj_desc, + &ret_buffer_desc); + if (status == AE_OK) { + acpi_ex_write_data_to_field(ret_buffer_desc, obj_desc, + NULL); + acpi_ut_remove_reference(ret_buffer_desc); + } + + acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); + + bit_length = obj_desc->common_field.bit_length; + byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length); + + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " [%s]", bit_length, + byte_length, + acpi_ut_get_region_name(region_obj->region. + space_id)); + return (status); + + default: + + acpi_os_printf + (" %s address space is not supported in this command [%4.4s]", + acpi_ut_get_region_name(region_obj->region.space_id), + region_obj->region.node->name.ascii); + return (AE_OK); + } +} + +/******************************************************************************* + * * FUNCTION: acpi_db_read_from_object * * PARAMETERS: node - Parent NS node for the object diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c index 3eb45ea93e5e..9dfd693cda3e 100644 --- a/drivers/acpi/acpica/dbxface.c +++ b/drivers/acpi/acpica/dbxface.c @@ -409,6 +409,7 @@ acpi_status acpi_initialize_debugger(void) acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; acpi_gbl_db_opt_no_ini_methods = FALSE; + acpi_gbl_db_opt_no_region_support = FALSE; acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE); if (!acpi_gbl_db_buffer) { diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 6b15625e8099..e2f00c54cb36 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -4,7 +4,7 @@ * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 0da96268deb5..c1f79d7a2026 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -4,7 +4,7 @@ * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -62,7 +62,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state, } } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case AML_IF_OP: /* @@ -85,7 +85,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state, walk_state->parser_state.pkg_end; control_state->control.opcode = op->common.aml_opcode; control_state->control.loop_timeout = acpi_os_get_timer() + - (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC); + ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC); /* Push the control state on this walk's control stack */ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 9d33f0bb2885..274b74255551 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -3,7 +3,7 @@ * * Module Name: dsdebug - Parser/Interpreter interface - debugging * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -100,7 +100,7 @@ acpi_ds_dump_method_stack(acpi_status status, /* Ignore control codes, they are not errors */ - if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) { + if (ACPI_CNTL_EXCEPTION(status)) { return_VOID; } diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 30fe89545d6a..df132c9089c7 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -3,7 +3,7 @@ * * Module Name: dsfield - Dispatcher field routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -149,7 +149,6 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, if (walk_state->deferred_node) { node = walk_state->deferred_node; - status = AE_OK; } else { /* Execute flag should always be set when this function is entered */ @@ -178,7 +177,10 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, arg->common.value.string, ACPI_TYPE_ANY, ACPI_IMODE_LOAD_PASS1, flags, walk_state, &node); - if (ACPI_FAILURE(status)) { + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) + && status == AE_ALREADY_EXISTS) { + status = AE_OK; + } else if (ACPI_FAILURE(status)) { ACPI_ERROR_NAMESPACE(walk_state->scope_info, arg->common.value.string, status); return_ACPI_STATUS(status); @@ -244,7 +246,7 @@ cleanup: * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status @@ -264,7 +266,6 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, union acpi_parse_object *child; #ifdef ACPI_EXEC_APP - u64 value = 0; union acpi_operand_object *result_desc; union acpi_operand_object *obj_desc; char *name_path; @@ -406,19 +407,17 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, name_path = acpi_ns_get_external_pathname(info-> field_node); - obj_desc = - acpi_ut_create_integer_object - (value); if (ACPI_SUCCESS (ae_lookup_init_file_entry - (name_path, &value))) { + (name_path, &obj_desc))) { acpi_ex_write_data_to_field (obj_desc, acpi_ns_get_attached_object (info->field_node), &result_desc); + acpi_ut_remove_reference + (obj_desc); } - acpi_ut_remove_reference(obj_desc); ACPI_FREE(name_path); #endif } @@ -518,6 +517,20 @@ acpi_ds_create_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + if (info.region_node->object->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM) { + region_node->object->field.internal_pcc_buffer = + ACPI_ALLOCATE_ZEROED(info.region_node->object->region. + length); + if (!region_node->object->field.internal_pcc_buffer) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + } + return_ACPI_STATUS(status); } @@ -629,8 +642,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, } /* Name already exists, just ignore this error */ - - status = AE_OK; } arg->common.node = node; diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index e8de1b0ce2f5..57cd9e2d1109 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -3,7 +3,7 @@ * * Module Name: dsinit - Object initialization namespace walk * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -200,9 +200,9 @@ acpi_ds_initialize_objects(u32 table_index, /* DSDT is always the first AML table */ - if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) { + if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - "\nInitializing Namespace objects:\n")); + "\nACPI table initialization:\n")); } /* Summary of objects initialized */ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index c1a4d02fafd5..45ec32e81903 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -3,7 +3,7 @@ * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, struct acpi_walk_state *next_walk_state = NULL; union acpi_operand_object *obj_desc; struct acpi_evaluate_info *info; - u32 i; ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); @@ -483,6 +482,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(AE_NULL_OBJECT); } + if (this_walk_state->num_operands < obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]", + acpi_ut_get_node_name(method_node))); + + return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS); + } + + else if (this_walk_state->num_operands > obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]", + acpi_ut_get_node_name(method_node))); + + return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS); + } + /* Init for new method, possibly wait on method mutex */ status = @@ -517,7 +530,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; - goto cleanup; + goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; @@ -529,7 +542,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ACPI_FREE(info); if (ACPI_FAILURE(status)) { - goto cleanup; + goto pop_walk_state; } next_walk_state->method_nesting_depth = @@ -539,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, * Delete the operands on the previous walkstate operand stack * (they were copied to new objects) */ - for (i = 0; i < obj_desc->method.param_count; i++) { - acpi_ut_remove_reference(this_walk_state->operands[i]); - this_walk_state->operands[i] = NULL; - } - - /* Clear the operand stack */ - - this_walk_state->num_operands = 0; + acpi_ds_clear_operands(this_walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", @@ -575,6 +581,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(status); +pop_walk_state: + + /* On error, pop the walk state to be deleted from thread */ + + acpi_ds_pop_walk_state(thread); + cleanup: /* On error, we must terminate the method properly */ diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index eca50517ad82..5393de4dbc4c 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params, index++; } + acpi_ex_trace_args(params, index); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index)); return_ACPI_STATUS(AE_OK); diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 6a9cc613adaa..1bf7eec49899 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -3,7 +3,7 @@ * * Module Name: dsobject - Dispatcher object management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 78f9de260d5f..5699b0872848 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -3,7 +3,7 @@ * * Module Name: dsopcode - Dispatcher support for regions and fields * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -130,8 +130,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, /* Must have a valid (>0) bit count */ if (bit_count == 0) { - ACPI_ERROR((AE_INFO, - "Attempt to CreateField of length zero")); + ACPI_BIOS_ERROR((AE_INFO, + "Attempt to CreateField of length zero")); status = AE_AML_OPERAND_VALUE; goto cleanup; } @@ -194,12 +194,13 @@ acpi_ds_init_buffer_field(u16 aml_opcode, /* Entire field must fit within the current length of the buffer */ if ((bit_offset + bit_count) > (8 * (u32)buffer_desc->buffer.length)) { - ACPI_ERROR((AE_INFO, - "Field [%4.4s] at bit offset/length %u/%u " - "exceeds size of target Buffer (%u bits)", - acpi_ut_get_node_name(result_desc), bit_offset, - bit_count, 8 * (u32)buffer_desc->buffer.length)); status = AE_AML_BUFFER_LIMIT; + ACPI_BIOS_EXCEPTION((AE_INFO, status, + "Field [%4.4s] at bit offset/length %u/%u " + "exceeds size of target Buffer (%u bits)", + acpi_ut_get_node_name(result_desc), + bit_offset, bit_count, + 8 * (u32)buffer_desc->buffer.length)); goto cleanup; } @@ -216,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, } obj_desc->buffer_field.buffer_obj = buffer_desc; + obj_desc->buffer_field.is_create_field = + aml_opcode == AML_CREATE_FIELD_OP; /* Reference count for buffer_desc inherits obj_desc count */ @@ -355,6 +358,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, union acpi_operand_object *operand_desc; struct acpi_namespace_node *node; union acpi_parse_object *next_op; + acpi_adr_space_type space_id; ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op); @@ -367,6 +371,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, /* next_op points to the op that holds the space_ID */ next_op = op->common.value.arg; + space_id = (acpi_adr_space_type)next_op->common.value.integer; /* next_op points to address op */ @@ -402,6 +407,15 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, obj_desc->region.length = (u32) operand_desc->integer.value; acpi_ut_remove_reference(operand_desc); + /* A zero-length operation region is unusable. Just warn */ + + if (!obj_desc->region.length + && (space_id < ACPI_NUM_PREDEFINED_REGIONS)) { + ACPI_WARNING((AE_INFO, + "Operation Region [%4.4s] has zero length (SpaceId %X)", + node->name.ascii, space_id)); + } + /* * Get the address and save it * (at top of stack - 1) @@ -517,6 +531,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); obj_desc->region.length = table->length; + obj_desc->region.pointer = table; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", obj_desc, diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 584853385268..1ed2386fab82 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -3,7 +3,7 @@ * * Module Name: dspkginit - Completion of deferred package initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index fb9ed5e1da89..baf6a1f27605 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS]; u32 arg_count = 0; u32 index = walk_state->num_operands; + u32 prev_num_operands = walk_state->num_operands; + u32 new_num_operands; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg); @@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, /* Create the interpreter arguments, in reverse order */ + new_num_operands = index; index--; for (i = 0; i < arg_count; i++) { arg = arguments[index]; @@ -720,7 +723,11 @@ cleanup: * pop everything off of the operand stack and delete those * objects */ - acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); + walk_state->num_operands = (u8)(i); + acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state); + + /* Restore operand count */ + walk_state->num_operands = (u8)(prev_num_operands); ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index)); return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 1504b93cc5f4..5c5c6d8a4e48 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -4,7 +4,7 @@ * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -16,6 +16,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acdebug.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswexec") @@ -27,7 +30,7 @@ static acpi_execute_op acpi_gbl_op_type_dispatch[] = { acpi_ex_opcode_0A_0T_1R, acpi_ex_opcode_1A_0T_0R, acpi_ex_opcode_1A_0T_1R, - acpi_ex_opcode_1A_1T_0R, + NULL, /* Was: acpi_ex_opcode_1A_0T_0R (Was for Load operator) */ acpi_ex_opcode_1A_1T_1R, acpi_ex_opcode_2A_0T_0R, acpi_ex_opcode_2A_0T_1R, @@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) u32 op_class; union acpi_parse_object *next_op; union acpi_parse_object *first_arg; +#ifdef ACPI_EXEC_APP + char *namepath; + union acpi_operand_object *obj_desc; +#endif ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state); @@ -382,9 +389,11 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) /* * All opcodes require operand resolution, with the only exceptions - * being the object_type and size_of operators. + * being the object_type and size_of operators as well as opcodes that + * take no arguments. */ - if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) { + if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE) && + (walk_state->op_info->flags & AML_HAS_ARGS)) { /* Resolve all operands */ @@ -537,6 +546,31 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) status = acpi_ds_eval_buffer_field_operands(walk_state, op); + if (ACPI_FAILURE(status)) { + break; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * buffer_fields in this code.) + */ + namepath = + acpi_ns_get_external_pathname(op->common.node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + status = + acpi_ex_write_data_to_field(obj_desc, + op->common. + node->object, + NULL); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "While writing to buffer field")); + } + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_TYPE_CREATE_OBJECT: @@ -565,8 +599,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) break; } - /* Fall through */ - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case AML_INT_EVAL_SUBTREE_OP: diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index e2ef09643d50..666419b6a5c6 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -3,7 +3,7 @@ * * Module Name: dswload - Dispatcher first pass namespace load callbacks * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -14,7 +14,6 @@ #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" - #ifdef ACPI_ASL_COMPILER #include "acdisasm.h" #endif @@ -225,7 +224,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state, break; } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; default: @@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) union acpi_parse_object *op; acpi_object_type object_type; acpi_status status = AE_OK; - #ifdef ACPI_ASL_COMPILER u8 param_count; #endif @@ -410,6 +408,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 9a309f5c4de8..bfc54c914757 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -3,7 +3,7 @@ * * Module Name: dswload2 - Dispatcher second pass namespace load callbacks * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -15,6 +15,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acevents.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswload2") @@ -24,7 +27,7 @@ ACPI_MODULE_NAME("dswload2") * FUNCTION: acpi_ds_load2_begin_op * * PARAMETERS: walk_state - Current state of the parse tree walk - * out_op - Wher to return op if a new one is created + * out_op - Where to return op if a new one is created * * RETURN: Status * @@ -211,7 +214,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, break; } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; default: @@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) struct acpi_namespace_node *new_node; u32 i; u8 region_space; +#ifdef ACPI_EXEC_APP + union acpi_operand_object *obj_desc; + char *namepath; +#endif ACPI_FUNCTION_TRACE(ds_load2_end_op); @@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) * be evaluated later during the execution phase */ status = acpi_ds_create_buffer_field(op, walk_state); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "CreateBufferField failure")); + goto cleanup; + } break; case AML_TYPE_NAMED_FIELD: @@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) case AML_NAME_OP: status = acpi_ds_create_node(walk_state, node, op); + if (ACPI_FAILURE(status)) { + goto cleanup; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * Name opcodes in this code.) + */ + namepath = acpi_ns_get_external_pathname(node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + + /* Detach any existing object, attach new object */ + + if (node->object) { + acpi_ns_detach_object(node); + } + acpi_ns_attach_object(node, obj_desc, + obj_desc->common.type); + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_METHOD_OP: diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 7592176a8fa2..375a8fa43d9d 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -3,7 +3,7 @@ * * Module Name: dswscope - Scope stack manipulation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index 4c1ec202d5ab..02aaddb89df9 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -3,7 +3,7 @@ * * Module Name: dswstate - Dispatcher parse tree walk management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -146,8 +146,8 @@ acpi_ds_result_push(union acpi_operand_object *object, if (!object) { ACPI_ERROR((AE_INFO, - "Null Object! Obj=%p State=%p Num=%u", - object, walk_state, walk_state->result_count)); + "Null Object! State=%p Num=%u", + walk_state, walk_state->result_count)); return (AE_BAD_PARAMETER); } @@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, ACPI_FUNCTION_TRACE(ds_init_aml_walk); walk_state->parser_state.aml = - walk_state->parser_state.aml_start = aml_start; - walk_state->parser_state.aml_end = - walk_state->parser_state.pkg_end = aml_start + aml_length; + walk_state->parser_state.aml_start = + walk_state->parser_state.aml_end = + walk_state->parser_state.pkg_end = aml_start; + /* Avoid undefined behavior: applying zero offset to null pointer */ + if (aml_length != 0) { + walk_state->parser_state.aml_end += aml_length; + walk_state->parser_state.pkg_end += aml_length; + } /* The next_op of the next_walk will be the beginning of the method */ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index b3d07cc14d75..6cdd39c987b8 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -3,7 +3,7 @@ * * Module Name: evevent - Fixed Event handling and dispatch * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -130,7 +130,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void) /* * Initialize the structure that keeps track of fixed event handlers and - * enable the fixed events. + * disable all of the fixed events. */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_gbl_fixed_event_handlers[i].handler = NULL; @@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) handler) (acpi_gbl_fixed_event_handlers[event].context)); } +/******************************************************************************* + * + * FUNCTION: acpi_any_fixed_event_status_set + * + * PARAMETERS: None + * + * RETURN: TRUE or FALSE + * + * DESCRIPTION: Checks the PM status register for active fixed events + * + ******************************************************************************/ + +u32 acpi_any_fixed_event_status_set(void) +{ + acpi_status status; + u32 in_status; + u32 in_enable; + u32 i; + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + + /* Both the status and enable bits must be on for this event */ + + if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + return (TRUE); + } + } + + return (FALSE); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 1b8a662a14a9..df2a4ab0e0da 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -3,7 +3,7 @@ * * Module Name: evglock - Global Lock support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void) return_ACPI_STATUS(AE_OK); } + if (!acpi_gbl_use_global_lock) { + return_ACPI_STATUS(AE_OK); + } + /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index e10fec99a182..ba65b2ea49b2 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -3,7 +3,7 @@ * * Module Name: evgpe - General Purpose Event handling and dispatch * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked) * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE + * clear_on_enable - Clear GPE status before enabling it * * RETURN: Status * @@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked) ******************************************************************************/ acpi_status -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info, + u8 clear_on_enable) { acpi_status status = AE_OK; @@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) /* Enable on first reference */ + if (clear_on_enable) { + (void)acpi_hw_clear_gpe(gpe_event_info); + } + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); @@ -650,14 +656,14 @@ acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device, /* GPE currently enabled (enable bit == 1)? */ - status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address); + status = acpi_hw_gpe_read(&enable_reg, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto error_exit; } /* GPE currently active (status bit == 1)? */ - status = acpi_hw_read(&status_reg, &gpe_register_info->status_address); + status = acpi_hw_gpe_read(&status_reg, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto error_exit; } @@ -801,7 +807,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, dispatch.handler-> context); - /* If requested, clear (if level-triggered) and reenable the GPE */ + /* If requested, clear (if level-triggered) and re-enable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index b253063b09d3..fadd93caf1d5 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -3,7 +3,7 @@ * * Module Name: evgpeblk - GPE block creation and initialization. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -110,6 +110,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } if (!gpe_block->previous && !gpe_block->next) { @@ -230,12 +233,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) this_register->status_address.space_id = gpe_block->space_id; this_register->enable_address.space_id = gpe_block->space_id; - this_register->status_address.bit_width = - ACPI_GPE_REGISTER_WIDTH; - this_register->enable_address.bit_width = - ACPI_GPE_REGISTER_WIDTH; - this_register->status_address.bit_offset = 0; - this_register->enable_address.bit_offset = 0; /* Init the event_info for each GPE within this register */ @@ -248,14 +245,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) /* Disable all GPEs within this register */ - status = acpi_hw_write(0x00, &this_register->enable_address); + status = acpi_hw_gpe_write(0x00, &this_register->enable_address); if (ACPI_FAILURE(status)) { goto error_exit; } /* Clear any pending GPE events within this register */ - status = acpi_hw_write(0xFF, &this_register->status_address); + status = acpi_hw_gpe_write(0xFF, &this_register->status_address); if (ACPI_FAILURE(status)) { goto error_exit; } @@ -314,6 +311,23 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, return_ACPI_STATUS(AE_OK); } + /* Validate the space_ID */ + + if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && + (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { + ACPI_ERROR((AE_INFO, + "Unsupported address space: 0x%X", space_id)); + return_ACPI_STATUS(AE_SUPPORT); + } + + if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + status = acpi_hw_validate_io_block(address, + ACPI_GPE_REGISTER_WIDTH, + register_count); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + } + /* Allocate a new GPE block */ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); @@ -359,10 +373,10 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, walk_info.gpe_device = gpe_device; walk_info.execute_by_owner_id = FALSE; - status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, - ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, - acpi_ev_match_gpe_method, NULL, - &walk_info, NULL); + (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, + ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, + acpi_ev_match_gpe_method, NULL, &walk_info, + NULL); /* Return the new block */ @@ -453,7 +467,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, continue; } - status = acpi_ev_add_gpe_reference(gpe_event_info); + status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 1f686750bb1a..eb769739420e 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -3,7 +3,7 @@ * * Module Name: evgpeinit - System GPE initialization and update * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -32,6 +32,16 @@ ACPI_MODULE_NAME("evgpeinit") * kernel boot time as well. */ +#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES +#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) \ + acpi_gbl_FADT.xgpe##N##_block.space_id == \ + ACPI_ADR_SPACE_SYSTEM_MEMORY ? \ + (u64)acpi_gbl_xgpe##N##_block_logical_address : \ + acpi_gbl_FADT.xgpe##N##_block.address +#else +#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) acpi_gbl_FADT.xgpe##N##_block.address +#endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */ + /******************************************************************************* * * FUNCTION: acpi_ev_gpe_initialize @@ -49,6 +59,7 @@ acpi_status acpi_ev_gpe_initialize(void) u32 register_count1 = 0; u32 gpe_number_max = 0; acpi_status status; + u64 address; ACPI_FUNCTION_TRACE(ev_gpe_initialize); @@ -85,8 +96,9 @@ acpi_status acpi_ev_gpe_initialize(void) * If EITHER the register length OR the block address are zero, then that * particular block is not supported. */ - if (acpi_gbl_FADT.gpe0_block_length && - acpi_gbl_FADT.xgpe0_block.address) { + address = ACPI_FADT_GPE_BLOCK_ADDRESS(0); + + if (acpi_gbl_FADT.gpe0_block_length && address) { /* GPE block 0 exists (has both length and address > 0) */ @@ -97,7 +109,6 @@ acpi_status acpi_ev_gpe_initialize(void) /* Install GPE Block 0 */ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, - acpi_gbl_FADT.xgpe0_block. address, acpi_gbl_FADT.xgpe0_block. space_id, register_count0, 0, @@ -110,8 +121,9 @@ acpi_status acpi_ev_gpe_initialize(void) } } - if (acpi_gbl_FADT.gpe1_block_length && - acpi_gbl_FADT.xgpe1_block.address) { + address = ACPI_FADT_GPE_BLOCK_ADDRESS(1); + + if (acpi_gbl_FADT.gpe1_block_length && address) { /* GPE block 1 exists (has both length and address > 0) */ @@ -137,7 +149,6 @@ acpi_status acpi_ev_gpe_initialize(void) status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, - acpi_gbl_FADT.xgpe1_block. address, acpi_gbl_FADT.xgpe1_block. space_id, register_count1, @@ -156,8 +167,6 @@ acpi_status acpi_ev_gpe_initialize(void) * GPE0 and GPE1 do not have to be contiguous in the GPE number * space. However, GPE0 always starts at GPE number zero. */ - gpe_number_max = acpi_gbl_FADT.gpe1_base + - ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); } } @@ -169,7 +178,6 @@ acpi_status acpi_ev_gpe_initialize(void) ACPI_DEBUG_PRINT((ACPI_DB_INIT, "There are no GPE blocks defined in the FADT\n")); - status = AE_OK; goto cleanup; } @@ -292,7 +300,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, acpi_status status; u32 gpe_number; u8 temp_gpe_number; - char name[ACPI_NAME_SIZE + 1]; + char name[ACPI_NAMESEG_SIZE + 1]; u8 type; ACPI_FUNCTION_TRACE(ev_match_gpe_method); @@ -310,7 +318,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, * 1) Extract the method name and null terminate it */ ACPI_MOVE_32_TO_32(name, &method_node->name.integer); - name[ACPI_NAME_SIZE] = 0; + name[ACPI_NAMESEG_SIZE] = 0; /* 2) Name must begin with an underscore */ @@ -405,6 +413,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); gpe_event_info->dispatch.method_node = method_node; + walk_info->count++; ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Registered GPE method %s as GPE number 0x%.2X\n", diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 0fb6c70f44ed..d15b1d75c8ec 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -3,7 +3,7 @@ * * Module Name: evgpeutil - GPE utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 4ed1e67db6be..5a35dae945e2 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -3,7 +3,7 @@ * * Module Name: evhandler - Support for Address Space handlers * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -386,7 +386,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node, case ACPI_ADR_SPACE_DATA_TABLE: handler = acpi_ex_data_table_space_handler; - setup = NULL; + setup = acpi_ev_data_table_region_setup; break; default: @@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node, /* Init handler obj */ + status = + acpi_os_create_mutex(&handler_obj->address_space.context_mutex); + if (ACPI_FAILURE(status)) { + acpi_ut_remove_reference(handler_obj); + goto unlock_and_exit; + } + handler_obj->address_space.space_id = (u8)space_id; handler_obj->address_space.handler_flags = flags; handler_obj->address_space.region_list = NULL; diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index baadd635b5af..04a23a6c3bb1 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -3,7 +3,7 @@ * * Module Name: evmisc - Miscellaneous event manager support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -230,11 +230,15 @@ void acpi_ev_terminate(void) /* Disable all GPEs in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "Could not disable GPEs in GPE block")); + } status = acpi_ev_remove_global_lock_handler(); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, - "Could not remove Global Lock handler")); + ACPI_EXCEPTION((AE_INFO, status, + "Could not remove Global Lock handler")); } acpi_gbl_events_initialized = FALSE; @@ -250,6 +254,10 @@ void acpi_ev_terminate(void) /* Deallocate all handler objects installed within GPE info structs */ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "Could not delete GPE handlers")); + } /* Return to original mode if necessary */ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 49decca4e08f..fa3475da7ea9 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -3,7 +3,7 @@ * * Module Name: evregion - Operation Region support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -21,7 +21,8 @@ extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ static void -acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node); +acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, + acpi_adr_space_type space_id); static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, @@ -64,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void) acpi_gbl_default_address_spaces [i])) { acpi_ev_execute_reg_methods(acpi_gbl_root_node, + ACPI_UINT32_MAX, acpi_gbl_default_address_spaces [i], ACPI_REG_CONNECT); } @@ -111,6 +113,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; + acpi_mutex context_mutex; + u8 context_locked; acpi_physical_address address; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); @@ -135,6 +139,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, } context = handler_desc->address_space.context; + context_mutex = handler_desc->address_space.context_mutex; + context_locked = FALSE; /* * It may be the case that the region has never been initialized. @@ -157,6 +163,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, return_ACPI_STATUS(AE_NOT_EXIST); } + if (region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { + struct acpi_pcc_info *ctx = + handler_desc->address_space.context; + + ctx->internal_buffer = + field_obj->field.internal_pcc_buffer; + ctx->length = (u16)region_obj->region.length; + ctx->subspace_id = (u8)region_obj->region.address; + } + + if (region_obj->region.space_id == + ACPI_ADR_SPACE_FIXED_HARDWARE) { + struct acpi_ffh_info *ctx = + handler_desc->address_space.context; + + ctx->length = region_obj->region.length; + ctx->offset = region_obj->region.address; + } + /* * We must exit the interpreter because the region setup will * potentially execute control methods (for example, the _REG method @@ -203,6 +228,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, handler = handler_desc->address_space.handler; address = (region_obj->region.address + region_offset); + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", + ®ion_obj->region.handler->address_space, handler, + ACPI_FORMAT_UINT64(address), + acpi_ut_get_region_name(region_obj->region. + space_id))); + + if (!(handler_desc->address_space.handler_flags & + ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* + * For handlers other than the default (supplied) handlers, we must + * exit the interpreter because the handler *might* block -- we don't + * know what it will do, so we can't hold the lock on the interpreter. + */ + acpi_ex_exit_interpreter(); + } + /* * Special handling for generic_serial_bus and general_purpose_io: * There are three extra parameters that must be passed to the @@ -211,48 +253,39 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, * 2) Length of the above buffer * 3) Actual access length from the access_as() op * + * Since we pass these extra parameters via the context, which is + * shared between threads, we must lock the context to avoid these + * parameters being changed from another thread before the handler + * has completed running. + * * In addition, for general_purpose_io, the Address and bit_width fields * are defined as follows: * 1) Address is the pin number index of the field (bit offset from * the previous Connection) * 2) bit_width is the actual bit length of the field (number of pins) */ - if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS || + region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && context && field_obj) { - /* Get the Connection (resource_template) buffer */ + status = + acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER); + if (ACPI_FAILURE(status)) { + goto re_enter_interpreter; + } - context->connection = field_obj->field.resource_buffer; - context->length = field_obj->field.resource_length; - context->access_length = field_obj->field.access_length; - } - if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && - context && field_obj) { + context_locked = TRUE; /* Get the Connection (resource_template) buffer */ context->connection = field_obj->field.resource_buffer; context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; - address = field_obj->field.pin_number_index; - bit_width = field_obj->field.bit_length; - } - - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, - "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", - ®ion_obj->region.handler->address_space, handler, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(region_obj->region. - space_id))); - if (!(handler_desc->address_space.handler_flags & - ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { - /* - * For handlers other than the default (supplied) handlers, we must - * exit the interpreter because the handler *might* block -- we don't - * know what it will do, so we can't hold the lock on the intepreter. - */ - acpi_ex_exit_interpreter(); + if (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) { + address = field_obj->field.pin_number_index; + bit_width = field_obj->field.bit_length; + } } /* Call the handler */ @@ -260,6 +293,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, status = handler(function, address, bit_width, value, context, region_obj2->extra.region_context); + if (context_locked) { + acpi_os_release_mutex(context_mutex); + } + if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. @@ -276,6 +313,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, } } +re_enter_interpreter: if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* @@ -635,6 +673,7 @@ cleanup1: * FUNCTION: acpi_ev_execute_reg_methods * * PARAMETERS: node - Namespace node for the device + * max_depth - Depth to which search for _REG * space_id - The address space ID * function - Passed to _REG: On (1) or Off (0) * @@ -646,7 +685,7 @@ cleanup1: ******************************************************************************/ void -acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, +acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth, acpi_adr_space_type space_id, u32 function) { struct acpi_reg_walk_info info; @@ -680,14 +719,16 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ - (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, + (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, &info, NULL); - /* Special case for EC: handle "orphan" _REG methods with no region */ - - if (space_id == ACPI_ADR_SPACE_EC) { - acpi_ev_orphan_ec_reg_method(node); + /* + * Special case for EC and GPIO: handle "orphan" _REG methods with + * no region. + */ + if (space_id == ACPI_ADR_SPACE_EC || space_id == ACPI_ADR_SPACE_GPIO) { + acpi_ev_execute_orphan_reg_method(node, space_id); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, @@ -760,31 +801,28 @@ acpi_ev_reg_run(acpi_handle obj_handle, /******************************************************************************* * - * FUNCTION: acpi_ev_orphan_ec_reg_method + * FUNCTION: acpi_ev_execute_orphan_reg_method * - * PARAMETERS: ec_device_node - Namespace node for an EC device + * PARAMETERS: device_node - Namespace node for an ACPI device + * space_id - The address space ID * * RETURN: None * - * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC + * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI * device. This is a _REG method that has no corresponding region - * within the EC device scope. The orphan _REG method appears to - * have been enabled by the description of the ECDT in the ACPI - * specification: "The availability of the region space can be - * detected by providing a _REG method object underneath the - * Embedded Controller device." - * - * To quickly access the EC device, we use the ec_device_node used - * during EC handler installation. Otherwise, we would need to - * perform a time consuming namespace walk, executing _HID - * methods to find the EC device. + * within the device's scope. ACPI tables depending on these + * "orphan" _REG methods have been seen for both EC and GPIO + * Operation Regions. Presumably the Windows ACPI implementation + * always calls the _REG method independent of the presence of + * an actual Operation Region with the correct address space ID. * * MUTEX: Assumes the namespace is locked * ******************************************************************************/ static void -acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) +acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, + acpi_adr_space_type space_id) { acpi_handle reg_method; struct acpi_namespace_node *next_node; @@ -792,9 +830,9 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) struct acpi_object_list args; union acpi_object objects[2]; - ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method); + ACPI_FUNCTION_TRACE(ev_execute_orphan_reg_method); - if (!ec_device_node) { + if (!device_node) { return_VOID; } @@ -804,7 +842,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) /* Get a handle to a _REG method immediately under the EC device */ - status = acpi_get_handle(ec_device_node, METHOD_NAME__REG, ®_method); + status = acpi_get_handle(device_node, METHOD_NAME__REG, ®_method); if (ACPI_FAILURE(status)) { goto exit; /* There is no _REG method present */ } @@ -816,31 +854,31 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) * with other space IDs to be present; but the code below will then * execute the _REG method with the embedded_control space_ID argument. */ - next_node = acpi_ns_get_next_node(ec_device_node, NULL); + next_node = acpi_ns_get_next_node(device_node, NULL); while (next_node) { if ((next_node->type == ACPI_TYPE_REGION) && (next_node->object) && - (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) { + (next_node->object->region.space_id == space_id)) { goto exit; /* Do not execute the _REG */ } - next_node = acpi_ns_get_next_node(ec_device_node, next_node); + next_node = acpi_ns_get_next_node(device_node, next_node); } - /* Evaluate the _REG(embedded_control,Connect) method */ + /* Evaluate the _REG(space_id,Connect) method */ args.count = 2; args.pointer = objects; objects[0].type = ACPI_TYPE_INTEGER; - objects[0].integer.value = ACPI_ADR_SPACE_EC; + objects[0].integer.value = space_id; objects[1].type = ACPI_TYPE_INTEGER; objects[1].integer.value = ACPI_REG_CONNECT; - status = acpi_evaluate_object(reg_method, NULL, &args, NULL); + (void)acpi_evaluate_object(reg_method, NULL, &args, NULL); exit: /* We ignore all errors from above, don't care */ - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + (void)acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); return_VOID; } diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 17df5dacd43c..b03952798af5 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -3,7 +3,7 @@ * * Module Name: evrgnini- ACPI address_space (op_region) init * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -38,6 +38,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle, union acpi_operand_object *region_desc = (union acpi_operand_object *)handle; struct acpi_mem_space_context *local_region_context; + struct acpi_mem_mapping *mm; ACPI_FUNCTION_TRACE(ev_system_memory_region_setup); @@ -46,13 +47,14 @@ acpi_ev_system_memory_region_setup(acpi_handle handle, local_region_context = (struct acpi_mem_space_context *)*region_context; - /* Delete a cached mapping if present */ + /* Delete memory mappings if present */ - if (local_region_context->mapped_length) { - acpi_os_unmap_memory(local_region_context-> - mapped_logical_address, - local_region_context-> - mapped_length); + while (local_region_context->first_mm) { + mm = local_region_context->first_mm; + local_region_context->first_mm = mm->next_mm; + acpi_os_unmap_memory(mm->logical_address, + mm->length); + ACPI_FREE(mm); } ACPI_FREE(local_region_context); *region_context = NULL; @@ -198,7 +200,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, * root bridge. Still need to return a context object * for the new PCI_Config operation region, however. */ - status = AE_OK; } else { ACPI_EXCEPTION((AE_INFO, status, "Could not install PciConfig handler " @@ -407,6 +408,58 @@ acpi_ev_cmos_region_setup(acpi_handle handle, /******************************************************************************* * + * FUNCTION: acpi_ev_data_table_region_setup + * + * PARAMETERS: handle - Region we are interested in + * function - Start or stop + * handler_context - Address space handler context + * region_context - Region specific context + * + * RETURN: Status + * + * DESCRIPTION: Setup a data_table_region + * + * MUTEX: Assumes namespace is not locked + * + ******************************************************************************/ + +acpi_status +acpi_ev_data_table_region_setup(acpi_handle handle, + u32 function, + void *handler_context, void **region_context) +{ + union acpi_operand_object *region_desc = + (union acpi_operand_object *)handle; + struct acpi_data_table_mapping *local_region_context; + + ACPI_FUNCTION_TRACE(ev_data_table_region_setup); + + if (function == ACPI_REGION_DEACTIVATE) { + if (*region_context) { + ACPI_FREE(*region_context); + *region_context = NULL; + } + return_ACPI_STATUS(AE_OK); + } + + /* Create a new context */ + + local_region_context = + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_mapping)); + if (!(local_region_context)) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + /* Save the data table pointer for use in the handler */ + + local_region_context->pointer = region_desc->region.pointer; + + *region_context = local_region_context; + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * * FUNCTION: acpi_ev_default_region_setup * * PARAMETERS: handle - Region we are interested in @@ -516,25 +569,6 @@ acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj) handler_obj = obj_desc->common_notify.handler; break; - case ACPI_TYPE_METHOD: - /* - * If we are executing module level code, the original - * Node's object was replaced by this Method object and we - * saved the handler in the method object. - * - * Note: Only used for the legacy MLC support. Will - * be removed in the future. - * - * See acpi_ns_exec_module_code - */ - if (!acpi_gbl_execute_tables_as_methods && - obj_desc->method. - info_flags & ACPI_METHOD_MODULE_LEVEL) { - handler_obj = - obj_desc->method.dispatch.handler; - } - break; - default: /* Ignore other objects */ diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index febc332b00ac..86a8d41c079c 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -3,7 +3,7 @@ * * Module Name: evxface - External interfaces for ACPI events * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, ACPI_GPE_DISPATCH_METHOD) || (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) { - (void)acpi_ev_add_gpe_reference(gpe_event_info); + (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE); if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { /* Poll edge triggered GPEs to handle existing events */ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 970e940bdb17..4b052908d2e7 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -3,7 +3,7 @@ * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index b2d5f66cc1b0..60dacec1b121 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -3,7 +3,7 @@ * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) if (gpe_event_info) { if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != ACPI_GPE_DISPATCH_NONE) { - status = acpi_ev_add_gpe_reference(gpe_event_info); + status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE); if (ACPI_SUCCESS(status) && ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { @@ -644,17 +644,17 @@ ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * - * RETURN: None + * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function * (e.g. EC) or method (e.g. _Lxx/_Exx) handler. * ******************************************************************************/ -void acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number) +u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number) { ACPI_FUNCTION_TRACE(acpi_dispatch_gpe); - acpi_ev_detect_gpe(gpe_device, NULL, gpe_number); + return acpi_ev_detect_gpe(gpe_device, NULL, gpe_number); } ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe) @@ -669,9 +669,9 @@ ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe) * * RETURN: Status * - * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE + * DESCRIPTION: Clear and conditionally re-enable a GPE. This completes the GPE * processing. Intended for use by asynchronous host-installed - * GPE handlers. The GPE is only reenabled if the enable_for_run bit + * GPE handlers. The GPE is only re-enabled if the enable_for_run bit * is set in the GPE info. * ******************************************************************************/ @@ -795,6 +795,45 @@ acpi_status acpi_enable_all_wakeup_gpes(void) ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) +/****************************************************************************** + * + * FUNCTION: acpi_any_gpe_status_set + * + * PARAMETERS: gpe_skip_number - Number of the GPE to skip + * + * RETURN: Whether or not the status bit is set for any GPE + * + * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one + * represented by the "skip" argument, and return TRUE if any of + * them is set or FALSE otherwise. + * + ******************************************************************************/ +u32 acpi_any_gpe_status_set(u32 gpe_skip_number) +{ + acpi_status status; + acpi_handle gpe_device; + u8 ret; + + ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + status = acpi_get_gpe_device(gpe_skip_number, &gpe_device); + if (ACPI_FAILURE(status)) { + gpe_device = NULL; + } + + ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return (ret); +} + +ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set) + /******************************************************************************* * * FUNCTION: acpi_install_gpe_block diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 3b3a25d9f0e6..bccc672c934c 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -4,7 +4,7 @@ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -20,13 +20,14 @@ ACPI_MODULE_NAME("evxfregn") /******************************************************************************* * - * FUNCTION: acpi_install_address_space_handler + * FUNCTION: acpi_install_address_space_handler_internal * * PARAMETERS: device - Handle for the device * space_id - The address space ID * handler - Address of the handler * setup - Address of the setup function * context - Value passed to the handler on each access + * Run_reg - Run _REG methods for this address space? * * RETURN: Status * @@ -37,13 +38,16 @@ ACPI_MODULE_NAME("evxfregn") * are executed here, and these methods can only be safely executed after * the default handlers have been installed and the hardware has been * initialized (via acpi_enable_subsystem.) + * To avoid this problem pass FALSE for Run_Reg and later on call + * acpi_execute_reg_methods() to execute _REG. * ******************************************************************************/ -acpi_status -acpi_install_address_space_handler(acpi_handle device, - acpi_adr_space_type space_id, - acpi_adr_space_handler handler, - acpi_adr_space_setup setup, void *context) +static acpi_status +acpi_install_address_space_handler_internal(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context, u8 run_reg) { struct acpi_namespace_node *node; acpi_status status; @@ -80,14 +84,41 @@ acpi_install_address_space_handler(acpi_handle device, /* Run all _REG methods for this address space */ - acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); + if (run_reg) { + acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id, + ACPI_REG_CONNECT); + } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } +acpi_status +acpi_install_address_space_handler(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, void *context) +{ + return acpi_install_address_space_handler_internal(device, space_id, + handler, setup, + context, TRUE); +} + ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler) +acpi_status +acpi_install_address_space_handler_no_reg(acpi_handle device, + acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context) +{ + return acpi_install_address_space_handler_internal(device, space_id, + handler, setup, + context, FALSE); +} + +ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler_no_reg) /******************************************************************************* * @@ -226,3 +257,54 @@ unlock_and_exit: } ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) +/******************************************************************************* + * + * FUNCTION: acpi_execute_reg_methods + * + * PARAMETERS: device - Handle for the device + * max_depth - Depth to which search for _REG + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Execute _REG for all op_regions of a given space_id. + * + ******************************************************************************/ +acpi_status +acpi_execute_reg_methods(acpi_handle device, u32 max_depth, + acpi_adr_space_type space_id) +{ + struct acpi_namespace_node *node; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_execute_reg_methods); + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_validate_handle(device); + if (node) { + + /* Run all _REG methods for this address space */ + + acpi_ev_execute_reg_methods(node, max_depth, space_id, + ACPI_REG_CONNECT); + } else { + status = AE_BAD_PARAMETER; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 5e75c510ca25..c248c9b162fa 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -3,7 +3,7 @@ * * Module Name: exconcat - Concatenate-type AML operators * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 2373a7492151..4d7dd0fc6b07 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -3,7 +3,7 @@ * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -87,11 +87,21 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node; struct acpi_namespace_node *start_node; struct acpi_namespace_node *parameter_node = NULL; + union acpi_operand_object *return_obj; union acpi_operand_object *ddb_handle; u32 table_index; ACPI_FUNCTION_TRACE(ex_load_table_op); + /* Create the return object */ + + return_obj = acpi_ut_create_integer_object((u64)0); + if (!return_obj) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + *return_desc = return_obj; + /* Find the ACPI table in the RSDT/XSDT */ acpi_ex_exit_interpreter(); @@ -106,12 +116,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, /* Table not found, return an Integer=0 and AE_OK */ - ddb_handle = acpi_ut_create_integer_object((u64) 0); - if (!ddb_handle) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - *return_desc = ddb_handle; return_ACPI_STATUS(AE_OK); } @@ -174,12 +178,11 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - /* Complete the initialization/resolution of package objects */ + /* Complete the initialization/resolution of new objects */ - status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, 0, - acpi_ns_init_one_package, NULL, NULL, - NULL); + acpi_ex_exit_interpreter(); + acpi_ns_initialize_objects(); + acpi_ex_enter_interpreter(); /* Parameter Data (optional) */ @@ -199,7 +202,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, } } - *return_desc = ddb_handle; + /* Remove the reference to ddb_handle created by acpi_ex_add_table above */ + + acpi_ut_remove_reference(ddb_handle); + + /* Return -1 (non-zero) indicates success */ + + return_obj->integer.value = 0xFFFFFFFFFFFFFFFF; return_ACPI_STATUS(status); } @@ -250,7 +259,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer) * * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be * obtained - * target - Where a handle to the table will be stored + * target - Where the status of the load will be stored * walk_state - Current state * * RETURN: Status @@ -279,6 +288,20 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ACPI_FUNCTION_TRACE(ex_load_op); + if (target->common.descriptor_type == ACPI_DESC_TYPE_NAMED) { + target = + acpi_ns_get_attached_object(ACPI_CAST_PTR + (struct acpi_namespace_node, + target)); + } + if (target->common.type != ACPI_TYPE_INTEGER) { + ACPI_ERROR((AE_INFO, "Type not integer: %X", + target->common.type)); + return_ACPI_STATUS(AE_AML_OPERAND_TYPE); + } + + target->integer.value = 0; + /* Source Object can be either an op_region or a Buffer/Field */ switch (obj_desc->common.type) { @@ -412,7 +435,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, acpi_ex_exit_interpreter(); status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table), ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, - TRUE, &table_index); + table, TRUE, &table_index); acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { @@ -431,34 +454,22 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, */ status = acpi_ex_add_table(table_index, &ddb_handle); if (ACPI_FAILURE(status)) { - - /* On error, table_ptr was deallocated above */ - return_ACPI_STATUS(status); } - /* Complete the initialization/resolution of package objects */ - - status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, 0, - acpi_ns_init_one_package, NULL, NULL, - NULL); - - /* Store the ddb_handle into the Target operand */ + /* Complete the initialization/resolution of new objects */ - status = acpi_ex_store(ddb_handle, target, walk_state); - if (ACPI_FAILURE(status)) { - (void)acpi_ex_unload_table(ddb_handle); + acpi_ex_exit_interpreter(); + acpi_ns_initialize_objects(); + acpi_ex_enter_interpreter(); - /* table_ptr was deallocated above */ + /* Remove the reference to ddb_handle created by acpi_ex_add_table above */ - acpi_ut_remove_reference(ddb_handle); - return_ACPI_STATUS(status); - } + acpi_ut_remove_reference(ddb_handle); - /* Remove the reference by added by acpi_ex_store above */ + /* Return -1 (non-zero) indicates success */ - acpi_ut_remove_reference(ddb_handle); + target->integer.value = 0xFFFFFFFFFFFFFFFF; return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 1a70b80cc406..fded9bfc2436 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -3,7 +3,7 @@ * * Module Name: exconvrt - Object conversion routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,8 @@ ACPI_MODULE_NAME("exconvrt") /* Local prototypes */ static u32 -acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); +acpi_ex_convert_to_ascii(u64 integer, + u16 base, u8 *string, u8 max_length, u8 leading_zeros); /******************************************************************************* * @@ -225,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, /* Copy the string to the buffer */ new_buf = return_desc->buffer.pointer; - strncpy((char *)new_buf, (char *)obj_desc->string.pointer, - obj_desc->string.length); + memcpy((char *)new_buf, (char *)obj_desc->string.pointer, + obj_desc->string.length); break; default: @@ -249,6 +250,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, * base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX * string - Where the string is returned * data_width - Size of data item to be converted, in bytes + * leading_zeros - Allow leading zeros * * RETURN: Actual string length * @@ -257,7 +259,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, ******************************************************************************/ static u32 -acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) +acpi_ex_convert_to_ascii(u64 integer, + u16 base, u8 *string, u8 data_width, u8 leading_zeros) { u64 digit; u32 i; @@ -266,7 +269,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) u32 hex_length; u32 decimal_length; u32 remainder; - u8 supress_zeros; + u8 supress_zeros = !leading_zeros; + u8 hex_char; ACPI_FUNCTION_ENTRY(); @@ -293,7 +297,6 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) break; } - supress_zeros = TRUE; /* No leading zeros */ remainder = 0; for (i = decimal_length; i > 0; i--) { @@ -328,8 +331,17 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) /* Get one hex digit, most significant digits first */ - string[k] = (u8) + hex_char = (u8) acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j)); + + /* Supress leading zeros until the first non-zero character */ + + if (hex_char == ACPI_ASCII_ZERO && supress_zeros) { + continue; + } + + supress_zeros = FALSE; + string[k] = hex_char; k++; } break; @@ -379,6 +391,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, u32 string_length = 0; u16 base = 16; u8 separator = ','; + u8 leading_zeros; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc); @@ -400,14 +413,26 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * Make room for the maximum decimal number size */ string_length = ACPI_MAX_DECIMAL_DIGITS; + leading_zeros = FALSE; base = 10; break; + case ACPI_EXPLICIT_CONVERT_HEX: + /* + * From to_hex_string. + * + * Supress leading zeros and append "0x" + */ + string_length = + ACPI_MUL_2(acpi_gbl_integer_byte_width) + 2; + leading_zeros = FALSE; + break; default: /* Two hex string characters for each integer byte */ string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width); + leading_zeros = TRUE; break; } @@ -422,17 +447,32 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, } new_buf = return_desc->buffer.pointer; + if (type == ACPI_EXPLICIT_CONVERT_HEX) { + + /* Append "0x" prefix for explicit hex conversion */ + + *new_buf++ = '0'; + *new_buf++ = 'x'; + } /* Convert integer to string */ string_length = acpi_ex_convert_to_ascii(obj_desc->integer.value, base, new_buf, - acpi_gbl_integer_byte_width); + acpi_gbl_integer_byte_width, + leading_zeros); /* Null terminate at the correct place */ return_desc->string.length = string_length; + if (type == ACPI_EXPLICIT_CONVERT_HEX) { + + /* Take "0x" prefix into account */ + + return_desc->string.length += 2; + } + new_buf[string_length] = 0; break; @@ -448,6 +488,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * From ACPI: "If the input is a buffer, it is converted to a * a string of decimal values separated by commas." */ + leading_zeros = FALSE; base = 10; /* @@ -475,6 +516,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * * Each hex number is prefixed with 0x (11/2018) */ + leading_zeros = TRUE; separator = ' '; string_length = (obj_desc->buffer.length * 5); break; @@ -488,6 +530,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * * Each hex number is prefixed with 0x (11/2018) */ + leading_zeros = TRUE; separator = ','; string_length = (obj_desc->buffer.length * 5); break; @@ -520,7 +563,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, for (i = 0; i < obj_desc->buffer.length; i++) { if (base == 16) { - /* Emit 0x prefix for explict/implicit hex conversion */ + /* Emit 0x prefix for explicit/implicit hex conversion */ *new_buf++ = '0'; *new_buf++ = 'x'; @@ -528,7 +571,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, new_buf += acpi_ex_convert_to_ascii((u64) obj_desc-> buffer.pointer[i], - base, new_buf, 1); + base, new_buf, 1, + leading_zeros); /* Each digit is separated by either a comma or space */ diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 3304c6b1e8a7..052c69567997 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -3,7 +3,7 @@ * * Module Name: excreate - Named object creation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -279,6 +279,7 @@ acpi_ex_create_region(u8 * aml_start, obj_desc->region.space_id = space_id; obj_desc->region.address = 0; obj_desc->region.length = 0; + obj_desc->region.pointer = NULL; obj_desc->region.node = node; obj_desc->region.handler = NULL; obj_desc->common.flags &= diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index ebbc244039ab..81a07a52b73c 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -3,7 +3,7 @@ * * Module Name: exdebug - Support for stores to the AML Debug Object * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index f71dfa1e90e1..d8aeebaab70a 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -3,7 +3,7 @@ * * Module Name: exdump - Interpreter debug output routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -94,7 +94,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[9] = { "Parameter Count"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, - {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, + {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"} @@ -269,8 +269,8 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = { static struct acpi_exdump_info acpi_ex_dump_node[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL}, - {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"}, - {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"}, + {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(flags), "Flags"}, + {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"}, {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"}, {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"}, {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"}, diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index e5798f15793a..ced3ff9d0a86 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -3,7 +3,7 @@ * * Module Name: exfield - AML execution - field_unit read/write * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -22,7 +22,7 @@ ACPI_MODULE_NAME("exfield") */ #define ACPI_INVALID_PROTOCOL_ID 0x80 #define ACPI_MAX_PROTOCOL_ID 0x0F -const u8 acpi_protocol_lengths[] = { +static const u8 acpi_protocol_lengths[] = { ACPI_INVALID_PROTOCOL_ID, /* 0 - reserved */ ACPI_INVALID_PROTOCOL_ID, /* 1 - reserved */ 0x00, /* 2 - ATTRIB_QUICK */ @@ -41,6 +41,17 @@ const u8 acpi_protocol_lengths[] = { 0xFF /* F - ATTRIB_RAW_PROCESS_BYTES */ }; +#define PCC_MASTER_SUBSPACE 3 + +/* + * The following macros determine a given offset is a COMD field. + * According to the specification, generic subspaces (types 0-2) contains a + * 2-byte COMD field at offset 4 and master subspaces (type 3) contains a 4-byte + * COMD field starting at offset 12. + */ +#define GENERIC_SUBSPACE_COMMAND(a) (4 == a || a == 5) +#define MASTER_SUBSPACE_COMMAND(a) (12 <= a && a <= 15) + /******************************************************************************* * * FUNCTION: acpi_ex_get_protocol_buffer_length @@ -85,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length) * RETURN: Status * * DESCRIPTION: Read from a named field. Returns either an Integer or a - * Buffer, depending on the size of the field. + * Buffer, depending on the size of the field and whether if a + * field is created by the create_field() operator. * ******************************************************************************/ @@ -127,7 +139,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS || obj_desc->field.region_obj->region.space_id == - ACPI_ADR_SPACE_IPMI)) { + ACPI_ADR_SPACE_IPMI + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_RT + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_FIXED_HARDWARE)) { /* SMBus, GSBus, IPMI serial */ @@ -143,12 +159,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * + * However, all buffer fields created by create_field operator needs to + * remain as a buffer to match other AML interpreter implementations. + * * Note: Field.length is in bits. */ buffer_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); - if (buffer_length > acpi_gbl_integer_byte_width) { + if (buffer_length > acpi_gbl_integer_byte_width || + (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD && + obj_desc->buffer_field.is_create_field)) { /* Field is too large for an Integer, create a Buffer instead */ @@ -177,6 +198,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, status = acpi_ex_read_gpio(obj_desc, buffer); goto exit; + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM)) { + /* + * Reading from a PCC field unit does not require the handler because + * it only requires reading from the internal_pcc_buffer. + */ + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "PCC FieldRead bits %u\n", + obj_desc->field.bit_length)); + + memcpy(buffer, + obj_desc->field.region_obj->field.internal_pcc_buffer + + obj_desc->field.base_byte_offset, + (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. + bit_length)); + + *ret_buffer_desc = buffer_desc; + return AE_OK; } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, @@ -229,6 +269,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, { acpi_status status; u32 buffer_length; + u32 data_length; void *buffer; ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); @@ -264,7 +305,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS || obj_desc->field.region_obj->region.space_id == - ACPI_ADR_SPACE_IPMI)) { + ACPI_ADR_SPACE_IPMI + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_RT + || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_FIXED_HARDWARE)) { /* SMBus, GSBus, IPMI serial */ @@ -272,6 +317,39 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, acpi_ex_write_serial_bus(source_desc, obj_desc, result_desc); return_ACPI_STATUS(status); + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM)) { + /* + * According to the spec a write to the COMD field will invoke the + * region handler. Otherwise, write to the pcc_internal buffer. This + * implementation will use the offsets specified rather than the name + * of the field. This is considered safer because some firmware tools + * are known to obfiscate named objects. + */ + data_length = + (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. + bit_length); + memcpy(obj_desc->field.region_obj->field.internal_pcc_buffer + + obj_desc->field.base_byte_offset, + source_desc->buffer.pointer, data_length); + + if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) { + + /* Perform the write */ + + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "PCC COMD field has been written. Invoking PCC handler now.\n")); + + status = + acpi_ex_access_region(obj_desc, 0, + (u64 *)obj_desc->field. + region_obj->field. + internal_pcc_buffer, + ACPI_WRITE); + return_ACPI_STATUS(status); + } + return (AE_OK); } /* Get a pointer to the data to be written */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 516994133128..0771934c0455 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -3,7 +3,7 @@ * * Module Name: exfldio - Aml Field I/O * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -104,7 +104,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, #ifdef ACPI_UNDER_DEVELOPMENT /* * If the Field access is any_acc, we can now compute the optimal - * access (because we know know the length of the parent region) + * access (because we know the length of the parent region) */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { if (ACPI_FAILURE(status)) { @@ -434,7 +434,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, * region_field case and write the datum to the Operation Region */ - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case ACPI_TYPE_LOCAL_REGION_FIELD: /* diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index d91f15cdf3ae..07cbac58ed21 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -3,7 +3,7 @@ * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index c06079774bad..1fa013197fcf 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -3,7 +3,7 @@ * * Module Name: exmutex - ASL Mutex Acquire/Release functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 7eed79dcda83..76ab73c37e90 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -3,7 +3,7 @@ * * Module Name: exnames - interpreter/scanner name load/execute * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -53,10 +53,10 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs) /* Special case for root */ - size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1; + size_needed = 1 + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1; } else { size_needed = - prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1; + prefix_count + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1; } /* @@ -141,7 +141,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string) } for (index = 0; - (index < ACPI_NAME_SIZE) + (index < ACPI_NAMESEG_SIZE) && (acpi_ut_valid_name_char(*aml_address, 0)); index++) { char_buf[index] = *aml_address++; } diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index ba9fbae0cf91..6ac7e0ca5c9d 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -3,7 +3,7 @@ * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -163,6 +163,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state) return_ACPI_STATUS(status); } +#ifdef _OBSOLETE_CODE /* Was originally used for Load() operator */ /******************************************************************************* * * FUNCTION: acpi_ex_opcode_1A_1T_0R @@ -187,10 +188,12 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state) /* Examine the AML opcode */ switch (walk_state->opcode) { +#ifdef _OBSOLETE_CODE case AML_LOAD_OP: status = acpi_ex_load_op(operand[0], operand[1], walk_state); break; +#endif default: /* Unknown opcode */ @@ -204,6 +207,7 @@ cleanup: return_ACPI_STATUS(status); } +#endif /******************************************************************************* * @@ -215,6 +219,8 @@ cleanup: * * DESCRIPTION: Execute opcode with one argument, one target, and a * return value. + * January 2022: Added Load operator, with new ACPI 6.4 + * semantics. * ******************************************************************************/ @@ -239,6 +245,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) case AML_FIND_SET_LEFT_BIT_OP: case AML_FIND_SET_RIGHT_BIT_OP: case AML_FROM_BCD_OP: + case AML_LOAD_OP: case AML_TO_BCD_OP: case AML_CONDITIONAL_REF_OF_OP: @@ -338,6 +345,20 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) } break; + case AML_LOAD_OP: /* Result1 = Load (Operand[0], Result1) */ + + return_desc->integer.value = 0; + status = + acpi_ex_load_op(operand[0], return_desc, + walk_state); + if (ACPI_SUCCESS(status)) { + + /* Return -1 (non-zero) indicates success */ + + return_desc->integer.value = 0xFFFFFFFFFFFFFFFF; + } + break; + case AML_TO_BCD_OP: /* to_bcd (Operand, Result) */ return_desc->integer.value = 0; @@ -1007,7 +1028,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { - goto cleanup; + return_ACPI_STATUS + (status); } return_desc = temp_desc; diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 3a477566ba1b..a94fa4d70e99 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -3,7 +3,7 @@ * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -390,10 +390,10 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) /* Failure means that the Index was beyond the end of the object */ if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Index (0x%X%8.8X) is beyond end of object (length 0x%X)", - ACPI_FORMAT_UINT64(index), - (u32)length)); + ACPI_BIOS_EXCEPTION((AE_INFO, status, + "Index (0x%X%8.8X) is beyond end of object (length 0x%X)", + ACPI_FORMAT_UINT64(index), + (u32)length)); goto cleanup; } diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 764fa6f924ff..bf08110ed6d2 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -3,7 +3,7 @@ * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 3941525f3d6b..cb078e39abf7 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -3,7 +3,7 @@ * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 738f3c732363..1b1a006e82de 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -3,7 +3,7 @@ * * Module Name: exprep - ACPI AML field prep utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) if (info->connection_node) { second_desc = info->connection_node->object; + if (second_desc == NULL) { + break; + } if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_arguments(second_desc); @@ -473,10 +476,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 2c58f5e00b1a..a390a1c2b0ab 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -3,7 +3,7 @@ * * Module Name: exregion - ACPI default op_region (address space) handlers * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -41,9 +41,9 @@ acpi_ex_system_memory_space_handler(u32 function, acpi_status status = AE_OK; void *logical_addr_ptr = NULL; struct acpi_mem_space_context *mem_info = region_context; + struct acpi_mem_mapping *mm = mem_info->cur_mm; u32 length; acpi_size map_length; - acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif @@ -96,20 +96,37 @@ acpi_ex_system_memory_space_handler(u32 function, * Is 1) Address below the current mapping? OR * 2) Address beyond the current mapping? */ - if ((address < mem_info->mapped_physical_address) || - (((u64) address + length) > ((u64) - mem_info->mapped_physical_address + - mem_info->mapped_length))) { + if (!mm || (address < mm->physical_address) || + ((u64) address + length > (u64) mm->physical_address + mm->length)) { /* - * The request cannot be resolved by the current memory mapping; - * Delete the existing mapping and create a new one. + * The request cannot be resolved by the current memory mapping. + * + * Look for an existing saved mapping covering the address range + * at hand. If found, save it as the current one and carry out + * the access. */ - if (mem_info->mapped_length) { + for (mm = mem_info->first_mm; mm; mm = mm->next_mm) { + if (mm == mem_info->cur_mm) + continue; + + if (address < mm->physical_address) + continue; - /* Valid mapping, delete it */ + if ((u64) address + length > + (u64) mm->physical_address + mm->length) + continue; - acpi_os_unmap_memory(mem_info->mapped_logical_address, - mem_info->mapped_length); + mem_info->cur_mm = mm; + goto access; + } + + /* Create a new mappings list entry */ + mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm)); + if (!mm) { + ACPI_ERROR((AE_INFO, + "Unable to save memory mapping at 0x%8.8X%8.8X, size %u", + ACPI_FORMAT_UINT64(address), length)); + return_ACPI_STATUS(AE_NO_MEMORY); } /* @@ -120,52 +137,44 @@ acpi_ex_system_memory_space_handler(u32 function, map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); - /* - * If mapping the entire remaining portion of the region will cross - * a page boundary, just map up to the page boundary, do not cross. - * On some systems, crossing a page boundary while mapping regions - * can cause warnings if the pages have different attributes - * due to resource management. - * - * This has the added benefit of constraining a single mapping to - * one page, which is similar to the original code that used a 4k - * maximum window. - */ - page_boundary_map_length = (acpi_size) - (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address); - if (page_boundary_map_length == 0) { - page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; - } - - if (map_length > page_boundary_map_length) { - map_length = page_boundary_map_length; - } + if (map_length > ACPI_DEFAULT_PAGE_SIZE) + map_length = ACPI_DEFAULT_PAGE_SIZE; /* Create a new mapping starting at the address given */ - mem_info->mapped_logical_address = - acpi_os_map_memory(address, map_length); - if (!mem_info->mapped_logical_address) { + logical_addr_ptr = acpi_os_map_memory(address, map_length); + if (!logical_addr_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", ACPI_FORMAT_UINT64(address), (u32)map_length)); - mem_info->mapped_length = 0; + ACPI_FREE(mm); return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the physical address and mapping size */ - mem_info->mapped_physical_address = address; - mem_info->mapped_length = map_length; + mm->logical_address = logical_addr_ptr; + mm->physical_address = address; + mm->length = map_length; + + /* + * Add the new entry to the mappigs list and save it as the + * current mapping. + */ + mm->next_mm = mem_info->first_mm; + mem_info->first_mm = mm; + + mem_info->cur_mm = mm; } +access: /* * Generate a logical pointer corresponding to the address we want to * access */ - logical_addr_ptr = mem_info->mapped_logical_address + - ((u64) address - (u64) mem_info->mapped_physical_address); + logical_addr_ptr = mm->logical_address + + ((u64) address - (u64) mm->physical_address); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", @@ -481,8 +490,15 @@ acpi_ex_data_table_space_handler(u32 function, u64 *value, void *handler_context, void *region_context) { + struct acpi_data_table_mapping *mapping; + char *pointer; + ACPI_FUNCTION_TRACE(ex_data_table_space_handler); + mapping = (struct acpi_data_table_mapping *) region_context; + pointer = ACPI_CAST_PTR(char, mapping->pointer) + + (address - ACPI_PTR_TO_PHYSADDR(mapping->pointer)); + /* * Perform the memory read or write. The bit_width was already * validated. @@ -490,14 +506,14 @@ acpi_ex_data_table_space_handler(u32 function, switch (function) { case ACPI_READ: - memcpy(ACPI_CAST_PTR(char, value), - ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width)); + memcpy(ACPI_CAST_PTR(char, value), pointer, + ACPI_DIV_8(bit_width)); break; case ACPI_WRITE: - memcpy(ACPI_PHYSADDR_TO_PTR(address), - ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width)); + memcpy(pointer, ACPI_CAST_PTR(char, value), + ACPI_DIV_8(bit_width)); break; default: diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index ea4b0fe674f1..dd83631090fc 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -3,7 +3,7 @@ * * Module Name: exresnte - AML Interpreter object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 5e42c7de46fa..4589de3f3012 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -3,7 +3,7 @@ * * Module Name: exresolv - AML Interpreter object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index d94190bc5985..782ee353a709 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -3,7 +3,7 @@ * * Module Name: exresop - AML Interpreter operand/object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -198,7 +198,7 @@ acpi_ex_resolve_operands(u16 opcode, target_op = AML_DEBUG_OP; - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_LOCAL: @@ -264,7 +264,7 @@ acpi_ex_resolve_operands(u16 opcode, * Else not a string - fall through to the normal Reference * case below */ - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case ARGI_REFERENCE: /* References: */ case ARGI_INTEGER_REF: diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index ec61553c4483..6d2581ec22ad 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -3,7 +3,7 @@ * * Module Name: exserial - field_unit support for serial address spaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -21,7 +21,7 @@ ACPI_MODULE_NAME("exserial") * FUNCTION: acpi_ex_read_gpio * * PARAMETERS: obj_desc - The named field to read - * buffer - Where the return data is returnd + * buffer - Where the return data is returned * * RETURN: Status * @@ -195,6 +195,18 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc, function = ACPI_READ | (accessor_type << 16); break; + case ACPI_ADR_SPACE_PLATFORM_RT: + + buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE; + function = ACPI_READ; + break; + + case ACPI_ADR_SPACE_FIXED_HARDWARE: + + buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE; + function = ACPI_READ; + break; + default: return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); } @@ -311,6 +323,18 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc, function = ACPI_WRITE | (accessor_type << 16); break; + case ACPI_ADR_SPACE_PLATFORM_RT: + + buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE; + function = ACPI_WRITE; + break; + + case ACPI_ADR_SPACE_FIXED_HARDWARE: + + buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE; + function = ACPI_WRITE; + break; + default: return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); } @@ -325,8 +349,7 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc, /* Copy the input buffer data to the transfer buffer */ buffer = buffer_desc->buffer.pointer; - data_length = (buffer_length < source_desc->buffer.length ? - buffer_length : source_desc->buffer.length); + data_length = ACPI_MIN(buffer_length, source_desc->buffer.length); memcpy(buffer, source_desc->buffer.pointer, data_length); /* Lock entire transaction if requested */ diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 75d5665b7b2f..cbc42207496d 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -3,7 +3,7 @@ * * Module Name: exstore - AML Interpreter object store support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -96,7 +96,7 @@ acpi_ex_store(union acpi_operand_object *source_desc, return_ACPI_STATUS(AE_OK); } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; default: @@ -422,7 +422,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, break; } - /* Fallthrough */ + ACPI_FALLTHROUGH; case ACPI_TYPE_DEVICE: case ACPI_TYPE_EVENT: diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 31cba19652ed..0470b2639831 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -4,7 +4,7 @@ * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 4cd82ff509bc..5b168fbc03e8 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -3,7 +3,7 @@ * * Module Name: exstorob - AML object store support, store to object * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index ec8b5a22cad4..7f843c9d8a06 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -3,7 +3,7 @@ * * Module Name: exsystem - Interface to OS services * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -107,7 +107,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) * * FUNCTION: acpi_ex_system_do_stall * - * PARAMETERS: how_long - The amount of time to stall, + * PARAMETERS: how_long_us - The amount of time to stall, * in microseconds * * RETURN: Status @@ -120,24 +120,30 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) * ******************************************************************************/ -acpi_status acpi_ex_system_do_stall(u32 how_long) +acpi_status acpi_ex_system_do_stall(u32 how_long_us) { acpi_status status = AE_OK; ACPI_FUNCTION_ENTRY(); - if (how_long > 255) { /* 255 microseconds */ + if (how_long_us > 255) { /* - * Longer than 255 usec, this is an error + * Longer than 255 microseconds, this is an error * * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ - ACPI_ERROR((AE_INFO, - "Time parameter is too large (%u)", how_long)); + ACPI_ERROR_ONCE((AE_INFO, + "Time parameter is too large (%u)", + how_long_us)); status = AE_AML_OPERAND_VALUE; } else { - acpi_os_stall(how_long); + if (how_long_us > 100) { + ACPI_WARNING_ONCE((AE_INFO, + "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.", + how_long_us)); + } + acpi_os_stall(how_long_us); } return (status); @@ -147,7 +153,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long) * * FUNCTION: acpi_ex_system_do_sleep * - * PARAMETERS: how_long - The amount of time to sleep, + * PARAMETERS: how_long_ms - The amount of time to sleep, * in milliseconds * * RETURN: None @@ -156,7 +162,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long) * ******************************************************************************/ -acpi_status acpi_ex_system_do_sleep(u64 how_long) +acpi_status acpi_ex_system_do_sleep(u64 how_long_ms) { ACPI_FUNCTION_ENTRY(); @@ -168,11 +174,11 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long) * For compatibility with other ACPI implementations and to prevent * accidental deep sleeps, limit the sleep time to something reasonable. */ - if (how_long > ACPI_MAX_SLEEP) { - how_long = ACPI_MAX_SLEEP; + if (how_long_ms > ACPI_MAX_SLEEP) { + how_long_ms = ACPI_MAX_SLEEP; } - acpi_os_sleep(how_long); + acpi_os_sleep(how_long_ms); /* And now we must get the interpreter again */ diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index 9bd3fa56b51a..36934d4f26fb 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -3,7 +3,7 @@ * * Module Name: extrace - Support for interpreter execution tracing * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type, if (pathname) { ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, - "%s %s [0x%p:%s] execution.\n", + "%s %s [%s] execution.\n", acpi_ex_get_trace_event_name(type), - begin ? "Begin" : "End", aml, pathname)); + begin ? "Begin" : "End", pathname)); } else { ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, "%s %s [0x%p] execution.\n", @@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type, /******************************************************************************* * + * FUNCTION: acpi_ex_trace_args + * + * PARAMETERS: params - AML method arguments + * count - numer of method arguments + * + * RETURN: None + * + * DESCRIPTION: Trace any arguments + * + ******************************************************************************/ + +void +acpi_ex_trace_args(union acpi_operand_object **params, u32 count) +{ + u32 i; + + ACPI_FUNCTION_NAME(ex_trace_args); + + for (i = 0; i < count; i++) { + union acpi_operand_object *obj_desc = params[i]; + + if (!i) { + ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " ")); + } + + switch (obj_desc->common.type) { + case ACPI_TYPE_INTEGER: + ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value)); + break; + case ACPI_TYPE_STRING: + if (!obj_desc->string.length) { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL")); + continue; + } + if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT)) + acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); + break; + default: + ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown")); + break; + } + if (i+1 == count) { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n")); + } else { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", ")); + } + } +} + +/******************************************************************************* + * * FUNCTION: acpi_ex_start_trace_method * * PARAMETERS: method_node - Node of the method diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index bd22e27adf9b..cc10c0732218 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -3,7 +3,7 @@ * * Module Name: exutils - interpreter/scanner utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -160,7 +160,7 @@ u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc) * RETURN: None * * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field - * flags specifiy that it is to be obtained before field access. + * flags specify that it is to be obtained before field access. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 525e6ea5c114..a1e1fa787566 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -3,7 +3,7 @@ * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index e0ad3f11142e..631fd8e2b774 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -4,7 +4,7 @@ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); if (status == AE_CTRL_TERMINATE) { @@ -147,17 +149,13 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { - acpi_status status; u8 sleep_type_value; ACPI_FUNCTION_TRACE(hw_extended_wake_prep); - status = acpi_get_sleep_type_data(ACPI_STATE_S0, - &acpi_gbl_sleep_type_a, - &acpi_gbl_sleep_type_b); - if (ACPI_SUCCESS(status)) { + if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { sleep_type_value = - ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & + ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE), diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 2d2e2e41a685..386f4759c317 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -3,7 +3,7 @@ * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -26,6 +26,76 @@ acpi_hw_gpe_enable_write(u8 enable_mask, /****************************************************************************** * + * FUNCTION: acpi_hw_gpe_read + * + * PARAMETERS: value - Where the value is returned + * reg - GPE register structure + * + * RETURN: Status + * + * DESCRIPTION: Read from a GPE register in either memory or IO space. + * + * LIMITATIONS: <These limitations also apply to acpi_hw_gpe_write> + * space_ID must be system_memory or system_IO. + * + ******************************************************************************/ + +acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg) +{ + acpi_status status; + u32 value32; + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { +#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES + *value = (u64)ACPI_GET8((unsigned long)reg->address); + return_ACPI_STATUS(AE_OK); +#else + return acpi_os_read_memory((acpi_physical_address)reg->address, + value, ACPI_GPE_REGISTER_WIDTH); +#endif + } + + status = acpi_os_read_port((acpi_io_address)reg->address, + &value32, ACPI_GPE_REGISTER_WIDTH); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + *value = (u64)value32; + + return_ACPI_STATUS(AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: acpi_hw_gpe_write + * + * PARAMETERS: value - Value to be written + * reg - GPE register structure + * + * RETURN: Status + * + * DESCRIPTION: Write to a GPE register in either memory or IO space. + * + ******************************************************************************/ + +acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg) +{ + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { +#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES + ACPI_SET8((unsigned long)reg->address, value); + return_ACPI_STATUS(AE_OK); +#else + return acpi_os_write_memory((acpi_physical_address)reg->address, + value, ACPI_GPE_REGISTER_WIDTH); +#endif + } + + return acpi_os_write_port((acpi_io_address)reg->address, (u32)value, + ACPI_GPE_REGISTER_WIDTH); +} + +/****************************************************************************** + * * FUNCTION: acpi_hw_get_gpe_register_bit * * PARAMETERS: gpe_event_info - Info block for the GPE @@ -79,7 +149,8 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) /* Get current value of the enable register that contains this GPE */ - status = acpi_hw_read(&enable_mask, &gpe_register_info->enable_address); + status = acpi_hw_gpe_read(&enable_mask, + &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { return (status); } @@ -96,7 +167,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) return (AE_BAD_PARAMETER); } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case ACPI_GPE_ENABLE: @@ -118,9 +189,8 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) /* Write the updated enable mask */ - status = - acpi_hw_write(enable_mask, - &gpe_register_info->enable_address); + status = acpi_hw_gpe_write(enable_mask, + &gpe_register_info->enable_address); } return (status); } @@ -158,8 +228,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info) */ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); - status = - acpi_hw_write(register_bit, &gpe_register_info->status_address); + status = acpi_hw_gpe_write(register_bit, + &gpe_register_info->status_address); return (status); } @@ -227,7 +297,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, /* GPE currently enabled (enable bit == 1)? */ - status = acpi_hw_read(&in_byte, &gpe_register_info->enable_address); + status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { return (status); } @@ -238,7 +308,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, /* GPE currently active (status bit == 1)? */ - status = acpi_hw_read(&in_byte, &gpe_register_info->status_address); + status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { return (status); } @@ -274,7 +344,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask, gpe_register_info->enable_mask = enable_mask; - status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); + status = acpi_hw_gpe_write(enable_mask, + &gpe_register_info->enable_address); return (status); } @@ -341,9 +412,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Clear status on all GPEs in this register */ - status = - acpi_hw_write(0xFF, - &gpe_block->register_info[i].status_address); + status = acpi_hw_gpe_write(0xFF, + &gpe_block->register_info[i].status_address); if (ACPI_FAILURE(status)) { return (status); } @@ -444,6 +514,65 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, return (AE_OK); } +struct acpi_gpe_block_status_context { + struct acpi_gpe_register_info *gpe_skip_register_info; + u8 gpe_skip_mask; + u8 retval; +}; + +/****************************************************************************** + * + * FUNCTION: acpi_hw_get_gpe_block_status + * + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info + * gpe_block - Gpe Block info + * context - GPE list walk context data + * + * RETURN: Success + * + * DESCRIPTION: Produce a combined GPE status bits mask for the given block. + * + ******************************************************************************/ + +static acpi_status +acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, + void *context) +{ + struct acpi_gpe_block_status_context *c = context; + struct acpi_gpe_register_info *gpe_register_info; + u64 in_enable, in_status; + acpi_status status; + u8 ret_mask; + u32 i; + + /* Examine each GPE Register within the block */ + + for (i = 0; i < gpe_block->register_count; i++) { + gpe_register_info = &gpe_block->register_info[i]; + + status = acpi_hw_gpe_read(&in_enable, + &gpe_register_info->enable_address); + if (ACPI_FAILURE(status)) { + continue; + } + + status = acpi_hw_gpe_read(&in_status, + &gpe_register_info->status_address); + if (ACPI_FAILURE(status)) { + continue; + } + + ret_mask = in_enable & in_status; + if (ret_mask && c->gpe_skip_register_info == gpe_register_info) { + ret_mask &= ~c->gpe_skip_mask; + } + c->retval |= ret_mask; + } + + return (AE_OK); +} + /****************************************************************************** * * FUNCTION: acpi_hw_disable_all_gpes @@ -510,4 +639,45 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) return_ACPI_STATUS(status); } +/****************************************************************************** + * + * FUNCTION: acpi_hw_check_all_gpes + * + * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip + * gpe_skip_number - Number of the GPE to skip + * + * RETURN: Combined status of all GPEs + * + * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one + * represented by the "skip" arguments, and return TRUE if the + * status bit is set for at least one of them of FALSE otherwise. + * + ******************************************************************************/ + +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number) +{ + struct acpi_gpe_block_status_context context = { + .gpe_skip_register_info = NULL, + .retval = 0, + }; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device, + gpe_skip_number); + if (gpe_event_info) { + context.gpe_skip_register_info = gpe_event_info->register_info; + context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info); + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context); + return (context.retval != 0); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 69603ba52a3a..f62d5d024205 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -446,7 +446,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) * RETURN: Status * * DESCRIPTION: Write the PM1 A/B control registers. These registers are - * different than than the PM1 A/B status and enable registers + * different than the PM1 A/B status and enable registers * in that different values can be written to the A/B registers. * Most notably, the SLP_TYP bits can be different, as per the * values returned from the _Sx predefined methods. diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index d8b8fc2ff563..87d78bef6323 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -4,7 +4,7 @@ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the * original/legacy sleep/PM registers. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control); if (status == AE_CTRL_TERMINATE) { @@ -179,7 +181,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) { - acpi_status status; + acpi_status status = AE_OK; struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; u32 pm1a_control; @@ -192,10 +194,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) * This is unclear from the ACPI Spec, but it is required * by some machines. */ - status = acpi_get_sleep_type_data(ACPI_STATE_S0, - &acpi_gbl_sleep_type_a, - &acpi_gbl_sleep_type_b); - if (ACPI_SUCCESS(status)) { + if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { sleep_type_reg_info = acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE); sleep_enable_reg_info = @@ -216,9 +215,9 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) /* Insert the SLP_TYP bits */ - pm1a_control |= (acpi_gbl_sleep_type_a << + pm1a_control |= (acpi_gbl_sleep_type_a_s0 << sleep_type_reg_info->bit_position); - pm1b_control |= (acpi_gbl_sleep_type_b << + pm1b_control |= (acpi_gbl_sleep_type_b_s0 << sleep_type_reg_info->bit_position); /* Write the control registers and ignore any errors */ @@ -300,6 +299,18 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state) [ACPI_EVENT_POWER_BUTTON]. status_register_id, ACPI_CLEAR_STATUS); + /* Enable sleep button */ + + (void) + acpi_write_bit_register(acpi_gbl_fixed_event_info + [ACPI_EVENT_SLEEP_BUTTON]. + enable_register_id, ACPI_ENABLE_EVENT); + + (void) + acpi_write_bit_register(acpi_gbl_fixed_event_info + [ACPI_EVENT_SLEEP_BUTTON]. + status_register_id, ACPI_CLEAR_STATUS); + acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 5d5e27146fc2..a5e0bccae6a4 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -3,7 +3,7 @@ * * Name: hwtimer.c - ACPI Power Management Timer Interface * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 24f9b61aa404..496fd9e49f0b 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -3,7 +3,7 @@ * * Module Name: hwvalid - I/O request validation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width); * * The table is used to implement the Microsoft port access rules that * first appeared in Windows XP. Some ports are always illegal, and some - * ports are only illegal if the BIOS calls _OSI with a win_XP string or - * later (meaning that the BIOS itelf is post-XP.) + * ports are only illegal if the BIOS calls _OSI with nothing newer than + * the specific _OSI strings. * * This provides ACPICA with the desired port protections and * Microsoft compatibility. @@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) /* Port illegality may depend on the _OSI calls made by the BIOS */ - if (acpi_gbl_osi_data >= port_info->osi_dependency) { + if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL || + acpi_gbl_osi_data == port_info->osi_dependency) { ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n", ACPI_FORMAT_UINT64(address), @@ -292,3 +293,33 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width) return (AE_OK); } + +/****************************************************************************** + * + * FUNCTION: acpi_hw_validate_io_block + * + * PARAMETERS: Address Address of I/O port/register blobk + * bit_width Number of bits (8,16,32) in each register + * count Number of registers in the block + * + * RETURN: Status + * + * DESCRIPTION: Validates a block of I/O ports/registers. + * + ******************************************************************************/ + +acpi_status acpi_hw_validate_io_block(u64 address, u32 bit_width, u32 count) +{ + acpi_status status; + + while (count--) { + status = acpi_hw_validate_io_request((acpi_io_address)address, + bit_width); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + address += ACPI_DIV_8(bit_width); + } + + return_ACPI_STATUS(AE_OK); +} diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 6e39a771a56e..847cd1b2493d 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -3,7 +3,7 @@ * * Module Name: hwxface - Public ACPICA hardware interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 3f22f7dd4556..9aabe30416da 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -3,7 +3,7 @@ * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -16,47 +16,11 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ -#if (!ACPI_REDUCED_HARDWARE) static acpi_status acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, acpi_physical_address physical_address, acpi_physical_address physical_address64); -#endif -static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); - -/* - * Dispatch table used to efficiently branch to the various sleep - * functions. - */ -#define ACPI_SLEEP_FUNCTION_ID 0 -#define ACPI_WAKE_PREP_FUNCTION_ID 1 -#define ACPI_WAKE_FUNCTION_ID 2 - -/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */ - -static struct acpi_sleep_functions acpi_sleep_dispatch[] = { - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_sleep)}, - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_wake_prep)}, - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_wake)} -}; - -/* - * These functions are removed for the ACPI_REDUCED_HARDWARE case: - * acpi_set_firmware_waking_vector - * acpi_enter_sleep_state_s4bios - */ - -#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_hw_set_firmware_waking_vector @@ -142,6 +106,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address, ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) +/* + * These functions are removed for the ACPI_REDUCED_HARDWARE case: + * acpi_enter_sleep_state_s4bios + */ + +#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_s4bios @@ -189,10 +159,11 @@ acpi_status acpi_enter_sleep_state_s4bios(void) return_ACPI_STATUS(status); } - ACPI_FLUSH_CPU_CACHE(); - status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.s4_bios_request, 8); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } do { acpi_os_stall(ACPI_USEC_PER_MSEC); @@ -209,53 +180,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void) ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) #endif /* !ACPI_REDUCED_HARDWARE */ -/******************************************************************************* - * - * FUNCTION: acpi_hw_sleep_dispatch - * - * PARAMETERS: sleep_state - Which sleep state to enter/exit - * function_id - Sleep, wake_prep, or Wake - * - * RETURN: Status from the invoked sleep handling function. - * - * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling - * function. - * - ******************************************************************************/ -static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) -{ - acpi_status status; - struct acpi_sleep_functions *sleep_functions = - &acpi_sleep_dispatch[function_id]; - -#if (!ACPI_REDUCED_HARDWARE) - /* - * If the Hardware Reduced flag is set (from the FADT), we must - * use the extended sleep registers (FADT). Note: As per the ACPI - * specification, these extended registers are to be used for HW-reduced - * platforms only. They are not general-purpose replacements for the - * legacy PM register sleep support. - */ - if (acpi_gbl_reduced_hardware) { - status = sleep_functions->extended_function(sleep_state); - } else { - /* Legacy sleep */ - - status = sleep_functions->legacy_function(sleep_state); - } - - return (status); - -#else - /* - * For the case where reduced-hardware-only code is being generated, - * we know that only the extended sleep registers are available - */ - status = sleep_functions->extended_function(sleep_state); - return (status); - -#endif /* !ACPI_REDUCED_HARDWARE */ -} /******************************************************************************* * @@ -288,6 +212,13 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) return_ACPI_STATUS(status); } + status = acpi_get_sleep_type_data(ACPI_STATE_S0, + &acpi_gbl_sleep_type_a_s0, + &acpi_gbl_sleep_type_b_s0); + if (ACPI_FAILURE(status)) { + acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID; + } + /* Execute the _PTS method (Prepare To Sleep) */ arg_list.count = 1; @@ -362,7 +293,12 @@ acpi_status acpi_enter_sleep_state(u8 sleep_state) return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } - status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_sleep(sleep_state); + else +#endif + status = acpi_hw_extended_sleep(sleep_state); return_ACPI_STATUS(status); } @@ -388,8 +324,12 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); - status = - acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_wake_prep(sleep_state); + else +#endif + status = acpi_hw_extended_wake_prep(sleep_state); return_ACPI_STATUS(status); } @@ -413,7 +353,12 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); - status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_wake(sleep_state); + else +#endif + status = acpi_hw_extended_wake(sleep_state); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 75192b958544..a0c1a665dfc1 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -36,6 +36,7 @@ acpi_status acpi_ns_root_initialize(void) acpi_status status; const struct acpi_predefined_names *init_val = NULL; struct acpi_namespace_node *new_node; + struct acpi_namespace_node *prev_node = NULL; union acpi_operand_object *obj_desc; acpi_string val = NULL; @@ -61,12 +62,28 @@ acpi_status acpi_ns_root_initialize(void) */ acpi_gbl_root_node = &acpi_gbl_root_node_struct; - /* Enter the pre-defined names in the name table */ + /* Enter the predefined names in the name table */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Entering predefined entries into namespace\n")); + /* + * Create the initial (default) namespace. + * This namespace looks like something similar to this: + * + * ACPI Namespace (from Namespace Root): + * 0 _GPE Scope 00203160 00 + * 0 _PR_ Scope 002031D0 00 + * 0 _SB_ Device 00203240 00 Notify Object: 0020ADD8 + * 0 _SI_ Scope 002032B0 00 + * 0 _TZ_ Device 00203320 00 + * 0 _REV Integer 00203390 00 = 0000000000000002 + * 0 _OS_ String 00203488 00 Len 14 "Microsoft Windows NT" + * 0 _GL_ Mutex 00203580 00 Object 002035F0 + * 0 _OSI Method 00203678 00 Args 1 Len 0000 Aml 00000000 + */ for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) { + status = AE_OK; /* _OSI is optional for now, will be permanent later */ @@ -75,17 +92,31 @@ acpi_status acpi_ns_root_initialize(void) continue; } - status = - acpi_ns_lookup(NULL, ACPI_CAST_PTR(char, init_val->name), - init_val->type, ACPI_IMODE_LOAD_PASS2, - ACPI_NS_NO_UPSEARCH, NULL, &new_node); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Could not create predefined name %s", - init_val->name)); - continue; + /* + * Create, init, and link the new predefined name + * Note: No need to use acpi_ns_lookup here because all the + * predefined names are at the root level. It is much easier to + * just create and link the new node(s) here. + */ + new_node = + acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name)); + if (!new_node) { + status = AE_NO_MEMORY; + goto unlock_and_exit; } + new_node->descriptor_type = ACPI_DESC_TYPE_NAMED; + new_node->type = init_val->type; + + if (!prev_node) { + acpi_gbl_root_node_struct.child = new_node; + } else { + prev_node->peer = new_node; + } + + new_node->parent = &acpi_gbl_root_node_struct; + prev_node = new_node; + /* * Name entered successfully. If entry in pre_defined_names[] specifies * an initial value, create the initial value. @@ -131,7 +162,7 @@ acpi_status acpi_ns_root_initialize(void) new_node->value = obj_desc->method.param_count; #else - /* Mark this as a very SPECIAL method */ + /* Mark this as a very SPECIAL method (_OSI) */ obj_desc->method.info_flags = ACPI_METHOD_INTERNAL_ONLY; @@ -566,7 +597,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, if (flags & ACPI_NS_PREFIX_MUST_EXIST) { acpi_os_printf(ACPI_MSG_BIOS_ERROR "Object does not exist: %4.4s\n", - &simple_name); + (char *)&simple_name); } #endif /* Name not found in ACPI namespace */ @@ -683,7 +714,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info, /* Point to next name segment and make this node current */ - path += ACPI_NAME_SIZE; + path += ACPI_NAMESEG_SIZE; current_node = this_node; } diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 5470213b8e64..83d26abcf448 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -74,6 +74,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node) ACPI_FUNCTION_NAME(ns_delete_node); + if (!node) { + return_VOID; + } + /* Detach an object if there is one */ acpi_ns_detach_object(node); @@ -237,7 +241,7 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp node->type = (u8) type; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, - "%4.4s (%s) [Node %p Owner %X] added to %4.4s (%s) [Node %p]\n", + "%4.4s (%s) [Node %p Owner %3.3X] added to %4.4s (%s) [Node %p]\n", acpi_ut_get_node_name(node), acpi_ut_get_type_name(node->type), node, owner_id, acpi_ut_get_node_name(parent_node), @@ -290,7 +294,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) node_to_delete = next_node; next_node = next_node->peer; acpi_ns_delete_node(node_to_delete); - }; + } /* Clear the parent's child pointer */ diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index b9ede797b654..366d54a1d157 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -3,7 +3,7 @@ * * Module Name: nsarguments - Validation of args for ACPI predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -55,7 +55,9 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info) arg_type = METHOD_GET_NEXT_TYPE(arg_type_list); user_arg_type = info->parameters[i]->common.type; - if (user_arg_type != arg_type) { + /* No typechecking for ACPI_TYPE_ANY */ + + if ((user_arg_type != arg_type) && (arg_type != ACPI_TYPE_ANY)) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Argument #%u type mismatch - " diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index f9527346b0f7..f05a92b88642 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -4,7 +4,7 @@ * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -486,5 +486,5 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope, error_exit: ACPI_FREE(name); *return_object = new_object; - return (AE_OK); + return (status); } diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 90ccffcd770b..6dc20486ad51 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -70,7 +70,7 @@ void acpi_ns_print_pathname(u32 num_segments, const char *pathname) acpi_os_printf("?"); } - pathname += ACPI_NAME_SIZE; + pathname += ACPI_NAMESEG_SIZE; num_segments--; if (num_segments) { acpi_os_printf("."); @@ -197,7 +197,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, /* Now we can print out the pertinent information */ - acpi_os_printf(" %-12s %p %2.2X ", + acpi_os_printf(" %-12s %p %3.3X ", acpi_ut_get_type_name(type), this_node, this_node->owner_id); @@ -291,7 +291,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, for (i = 0; (i < obj_desc->buffer.length && i < 12); i++) { - acpi_os_printf(" %.2hX", + acpi_os_printf(" %2.2X", obj_desc->buffer. pointer[i]); } @@ -404,7 +404,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: - acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n", + acpi_os_printf(" Off %.3X Len %.2X Acc %.2X\n", (obj_desc->common_field. base_byte_offset * 8) + @@ -589,8 +589,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, goto cleanup; } - - obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */ } cleanup: diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 2b291c500fb0..d5b16aaec233 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index 6390b7951ebf..63748ac699f7 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -14,11 +14,6 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nseval") -/* Local prototypes */ -static void -acpi_ns_exec_module_code(union acpi_operand_object *method_obj, - struct acpi_evaluate_info *info); - /******************************************************************************* * * FUNCTION: acpi_ns_evaluate @@ -44,7 +39,6 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj, * MUTEX: Locks interpreter * ******************************************************************************/ - acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) { acpi_status status; @@ -310,187 +304,3 @@ cleanup: info->full_pathname = NULL; return_ACPI_STATUS(status); } - -/******************************************************************************* - * - * FUNCTION: acpi_ns_exec_module_code_list - * - * PARAMETERS: None - * - * RETURN: None. Exceptions during method execution are ignored, since - * we cannot abort a table load. - * - * DESCRIPTION: Execute all elements of the global module-level code list. - * Each element is executed as a single control method. - * - * NOTE: With this option enabled, each block of detected executable AML - * code that is outside of any control method is wrapped with a temporary - * control method object and placed on a global list. The methods on this - * list are executed below. - * - * This function executes the module-level code for all tables only after - * all of the tables have been loaded. It is a legacy option and is - * not compatible with other ACPI implementations. See acpi_ns_load_table. - * - * This function will be removed when the legacy option is removed. - * - ******************************************************************************/ - -void acpi_ns_exec_module_code_list(void) -{ - union acpi_operand_object *prev; - union acpi_operand_object *next; - struct acpi_evaluate_info *info; - u32 method_count = 0; - - ACPI_FUNCTION_TRACE(ns_exec_module_code_list); - - /* Exit now if the list is empty */ - - next = acpi_gbl_module_code_list; - if (!next) { - ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES, - "Legacy MLC block list is empty\n")); - - return_VOID; - } - - /* Allocate the evaluation information block */ - - info = ACPI_ALLOCATE(sizeof(struct acpi_evaluate_info)); - if (!info) { - return_VOID; - } - - /* Walk the list, executing each "method" */ - - while (next) { - prev = next; - next = next->method.mutex; - - /* Clear the link field and execute the method */ - - prev->method.mutex = NULL; - acpi_ns_exec_module_code(prev, info); - method_count++; - - /* Delete the (temporary) method object */ - - acpi_ut_remove_reference(prev); - } - - ACPI_INFO(("Executed %u blocks of module-level executable AML code", - method_count)); - - ACPI_FREE(info); - acpi_gbl_module_code_list = NULL; - return_VOID; -} - -/******************************************************************************* - * - * FUNCTION: acpi_ns_exec_module_code - * - * PARAMETERS: method_obj - Object container for the module-level code - * info - Info block for method evaluation - * - * RETURN: None. Exceptions during method execution are ignored, since - * we cannot abort a table load. - * - * DESCRIPTION: Execute a control method containing a block of module-level - * executable AML code. The control method is temporarily - * installed to the root node, then evaluated. - * - ******************************************************************************/ - -static void -acpi_ns_exec_module_code(union acpi_operand_object *method_obj, - struct acpi_evaluate_info *info) -{ - union acpi_operand_object *parent_obj; - struct acpi_namespace_node *parent_node; - acpi_object_type type; - acpi_status status; - - ACPI_FUNCTION_TRACE(ns_exec_module_code); - - /* - * Get the parent node. We cheat by using the next_object field - * of the method object descriptor. - */ - parent_node = - ACPI_CAST_PTR(struct acpi_namespace_node, - method_obj->method.next_object); - type = acpi_ns_get_type(parent_node); - - /* - * Get the region handler and save it in the method object. We may need - * this if an operation region declaration causes a _REG method to be run. - * - * We can't do this in acpi_ps_link_module_code because - * acpi_gbl_root_node->Object is NULL at PASS1. - */ - if ((type == ACPI_TYPE_DEVICE) && parent_node->object) { - method_obj->method.dispatch.handler = - parent_node->object->device.handler; - } - - /* Must clear next_object (acpi_ns_attach_object needs the field) */ - - method_obj->method.next_object = NULL; - - /* Initialize the evaluation information block */ - - memset(info, 0, sizeof(struct acpi_evaluate_info)); - info->prefix_node = parent_node; - - /* - * Get the currently attached parent object. Add a reference, - * because the ref count will be decreased when the method object - * is installed to the parent node. - */ - parent_obj = acpi_ns_get_attached_object(parent_node); - if (parent_obj) { - acpi_ut_add_reference(parent_obj); - } - - /* Install the method (module-level code) in the parent node */ - - status = - acpi_ns_attach_object(parent_node, method_obj, ACPI_TYPE_METHOD); - if (ACPI_FAILURE(status)) { - goto exit; - } - - /* Execute the parent node as a control method */ - - status = acpi_ns_evaluate(info); - - ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES, - "Executed module-level code at %p\n", - method_obj->method.aml_start)); - - /* Delete a possible implicit return value (in slack mode) */ - - if (info->return_object) { - acpi_ut_remove_reference(info->return_object); - } - - /* Detach the temporary method object */ - - acpi_ns_detach_object(parent_node); - - /* Restore the original parent object */ - - if (parent_obj) { - status = acpi_ns_attach_object(parent_node, parent_obj, type); - } else { - parent_node->type = (u8)type; - } - -exit: - if (parent_obj) { - acpi_ut_remove_reference(parent_obj); - } - return_VOID; -} diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index d77257d1c827..03373e7f7978 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -3,7 +3,7 @@ * * Module Name: nsinit - namespace initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -55,14 +55,19 @@ acpi_status acpi_ns_initialize_objects(void) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - "Completing Region/Field/Buffer/Package initialization:\n")); + "Final data object initialization: ")); - /* Set all init info to zero */ + /* Clear the info block */ memset(&info, 0, sizeof(struct acpi_init_walk_info)); /* Walk entire namespace from the supplied root */ + /* + * TBD: will become ACPI_TYPE_PACKAGE as this type object + * is now the only one that supports deferred initialization + * (forward references). + */ status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL, &info, NULL); @@ -71,13 +76,8 @@ acpi_status acpi_ns_initialize_objects(void) } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - " Initialized %u/%u Regions %u/%u Fields %u/%u " - "Buffers %u/%u Packages (%u nodes)\n", - info.op_region_init, info.op_region_count, - info.field_init, info.field_count, - info.buffer_init, info.buffer_count, - info.package_init, info.package_count, - info.object_count)); + "Namespace contains %u (0x%X) objects\n", + info.object_count, info.object_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Control Methods found\n%u Op Regions found\n", @@ -382,34 +382,18 @@ acpi_ns_init_one_object(acpi_handle obj_handle, acpi_ex_enter_interpreter(); /* - * Each of these types can contain executable AML code within the - * declaration. + * Only initialization of Package objects can be deferred, in order + * to support forward references. */ switch (type) { - case ACPI_TYPE_REGION: - - info->op_region_init++; - status = acpi_ds_get_region_arguments(obj_desc); - break; - - case ACPI_TYPE_BUFFER_FIELD: - - info->field_init++; - status = acpi_ds_get_buffer_field_arguments(obj_desc); - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: + /* TBD: bank_fields do not require deferred init, remove this code */ + info->field_init++; status = acpi_ds_get_bank_field_arguments(obj_desc); break; - case ACPI_TYPE_BUFFER: - - info->buffer_init++; - status = acpi_ds_get_buffer_arguments(obj_desc); - break; - case ACPI_TYPE_PACKAGE: /* Complete the initialization/resolution of the package object */ @@ -421,8 +405,13 @@ acpi_ns_init_one_object(acpi_handle obj_handle, default: - /* No other types can get here */ + /* No other types should get here */ + status = AE_TYPE; + ACPI_EXCEPTION((AE_INFO, status, + "Opcode is not deferred [%4.4s] (%s)", + acpi_ut_get_node_name(node), + acpi_ut_get_type_name(type))); break; } @@ -478,7 +467,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle, /* We are only looking for methods named _INI */ - if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) { + if (!ACPI_COMPARE_NAMESEG(node->name.ascii, METHOD_NAME__INI)) { return (AE_OK); } @@ -641,7 +630,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle, * Note: We know there is an _INI within this subtree, but it may not be * under this particular device, it may be lower in the branch. */ - if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") || + if (!ACPI_COMPARE_NAMESEG(device_node->name.ascii, "_SB_") || device_node->parent != acpi_gbl_root_node) { ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, device_node, diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 04bc73e82aed..6ec4c646fff7 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -3,7 +3,7 @@ * * Module Name: nsload - namespace loading/expanding/contracting procedures * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -75,7 +75,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) /* * On error, delete any namespace objects created by this table. * We cannot initialize these objects, so delete them. There are - * a couple of expecially bad cases: + * a couple of especially bad cases: * AE_ALREADY_EXISTS - namespace collision. * AE_NOT_FOUND - the target of a Scope operator does not * exist. This target of Scope must already exist in the @@ -109,18 +109,6 @@ unlock: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); - /* - * This case handles the legacy option that groups all module-level - * code blocks together and defers execution until all of the tables - * are loaded. Execute all of these blocks at this time. - * Execute any module-level code that was detected during the table - * load phase. - * - * Note: this option is deprecated and will be eliminated in the - * future. Use of this option can cause problems with AML code that - * depends upon in-order immediate execution of module-level code. - */ - acpi_ns_exec_module_code_list(); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 289c15bb8c6a..22aeeeb56cff 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -13,9 +13,6 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") -/* Local Prototypes */ -static void acpi_ns_normalize_pathname(char *original_path); - /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname @@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path); * for error and debug statements. * ******************************************************************************/ - char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { char *name_buffer; @@ -108,8 +104,8 @@ acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer) /* Just copy the ACPI name from the Node and zero terminate it */ node_name = acpi_ut_get_node_name(node); - ACPI_MOVE_NAME(buffer->pointer, node_name); - ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; + ACPI_COPY_NAMESEG(buffer->pointer, node_name); + ((char *)buffer->pointer)[ACPI_NAMESEG_SIZE] = 0; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer)); return_ACPI_STATUS(AE_OK); @@ -164,7 +160,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, /* Build the path in the caller buffer */ (void)acpi_ns_build_normalized_path(node, buffer->pointer, - required_size, no_trailing); + (u32)required_size, no_trailing); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); @@ -198,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node, char *full_path, u32 path_size, u8 no_trailing) { u32 length = 0, i; - char name[ACPI_NAME_SIZE]; + char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; u8 do_no_trailing; char c, *left, *right; struct acpi_namespace_node *next_node; @@ -315,7 +311,7 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, /* Build the path in the allocated buffer */ - (void)acpi_ns_build_normalized_path(node, name_buffer, size, + (void)acpi_ns_build_normalized_path(node, name_buffer, (u32)size, no_trailing); ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, "%s: Path \"%s\"\n", @@ -346,7 +342,7 @@ char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope, char *full_path = NULL; char *external_path = NULL; char *prefix_path = NULL; - u32 prefix_path_length = 0; + acpi_size prefix_path_length = 0; /* If there is a prefix, get the pathname to it */ @@ -411,7 +407,7 @@ cleanup: * ******************************************************************************/ -static void acpi_ns_normalize_pathname(char *original_path) +void acpi_ns_normalize_pathname(char *original_path) { char *input_path = original_path; char *new_path_buffer; @@ -446,7 +442,7 @@ static void acpi_ns_normalize_pathname(char *original_path) /* Do one nameseg at a time */ - for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) { + for (i = 0; (i < ACPI_NAMESEG_SIZE) && *input_path; i++) { if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */ *new_path = *input_path; new_path++; diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 8638f43cfc3d..79d86da1c892 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) } } + if (obj_desc->common.type == ACPI_TYPE_REGION) { + acpi_ut_remove_address_range(obj_desc->region.space_id, node); + } + /* Clear the Node entry in all cases */ node->object = NULL; diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 488ff39d86f7..959e6379bc4c 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -3,7 +3,7 @@ * * Module Name: nsparse - namespace interface to AML parser * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -203,7 +203,7 @@ acpi_ns_one_complete_parse(u32 pass_number, /* Found OSDT table, enable the namespace override feature */ - if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) && + if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) && pass_number == ACPI_IMODE_LOAD_PASS1) { walk_state->namespace_override = TRUE; } @@ -253,61 +253,19 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) ACPI_FUNCTION_TRACE(ns_parse_table); - if (acpi_gbl_execute_tables_as_methods) { - /* - * This case executes the AML table as one large control method. - * The point of this is to execute any module-level code in-place - * as the table is parsed. Some AML code depends on this behavior. - * - * It is a run-time option at this time, but will eventually become - * the default. - * - * Note: This causes the table to only have a single-pass parse. - * However, this is compatible with other ACPI implementations. - */ - ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, - "%s: **** Start table execution pass\n", - ACPI_GET_FUNCTION_NAME)); - - status = acpi_ns_execute_table(table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } else { - /* - * AML Parse, pass 1 - * - * In this pass, we load most of the namespace. Control methods - * are not parsed until later. A parse tree is not created. - * Instead, each Parser Op subtree is deleted when it is finished. - * This saves a great deal of memory, and allows a small cache of - * parse objects to service the entire parse. The second pass of - * the parse then performs another complete parse of the AML. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); - - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + /* + * Executes the AML table as one large control method. + * The point of this is to execute any module-level code in-place + * as the table is parsed. Some AML code depends on this behavior. + * + * Note: This causes the table to only have a single-pass parse. + * However, this is compatible with other ACPI implementations. + */ + ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, + "%s: **** Start table execution pass\n", + ACPI_GET_FUNCTION_NAME)); - /* - * AML Parse, pass 2 - * - * In this pass, we resolve forward references and other things - * that could not be completed during the first pass. - * Another complete parse of the AML is performed, but the - * overhead of this is compensated for by the fact that the - * parse objects are all cached. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } + status = acpi_ns_execute_table(table_index, start_node); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 29c68b15a64f..81995ee48c49 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -3,7 +3,7 @@ * * Module Name: nspredef - Validation of ACPI predefined methods and objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -71,11 +71,13 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node, acpi_status status; const union acpi_predefined_info *predefined; + ACPI_FUNCTION_TRACE(ns_check_return_value); + /* If not a predefined name, we cannot validate the return object */ predefined = info->predefined; if (!predefined) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* @@ -83,7 +85,7 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node, * validate the return object */ if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* @@ -102,7 +104,7 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node, if (acpi_gbl_disable_auto_repair || (!predefined->info.expected_btypes) || (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* @@ -163,7 +165,7 @@ exit: node->flags |= ANOBJ_EVALUATED; } - return (status); + return_ACPI_STATUS(status); } /******************************************************************************* diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 51523473e7fe..ca137ce5674f 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -3,7 +3,7 @@ * * Module Name: nsprepkg - Validation of package objects for predefined names * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -59,7 +59,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, u32 count; u32 i; - ACPI_FUNCTION_NAME(ns_check_package); + ACPI_FUNCTION_TRACE(ns_check_package); /* The package info for this name is in the next table entry */ @@ -88,14 +88,14 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, */ if (!count) { if (package->ret_info.type == ACPI_PTYPE1_VAR) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return Package has no elements (empty)")); - return (AE_AML_OPERAND_VALUE); + return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } /* @@ -152,7 +152,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, package->ret_info. object_type1, i); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } elements++; @@ -186,7 +186,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, object_type[i], i); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } } else { /* These are the optional package elements */ @@ -198,7 +198,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, tail_object_type, i); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } } @@ -214,7 +214,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, acpi_ns_check_object_type(info, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } elements++; @@ -234,7 +234,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, acpi_ns_check_object_type(info, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } /* @@ -279,7 +279,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, acpi_ns_wrap_with_package(info, return_object, return_object_ptr); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } /* Update locals to point to the new package (of 1 element) */ @@ -316,7 +316,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, package->ret_info. object_type1, 0); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } /* Validate length of the UUID buffer */ @@ -326,14 +326,14 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, info->full_pathname, info->node_flags, "Invalid length for UUID Buffer")); - return (AE_AML_OPERAND_VALUE); + return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } status = acpi_ns_check_object_type(info, elements + 1, package->ret_info. object_type2, 0); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } elements += 2; @@ -350,10 +350,10 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, "Invalid internal return type in table entry: %X", package->ret_info.type)); - return (AE_AML_INTERNAL); + return_ACPI_STATUS(AE_AML_INTERNAL); } - return (status); + return_ACPI_STATUS(status); package_too_small: @@ -363,7 +363,7 @@ package_too_small: "Return Package is too small - found %u elements, expected %u", count, expected_count)); - return (AE_AML_OPERAND_VALUE); + return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } /******************************************************************************* @@ -708,6 +708,8 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info, acpi_status status; u32 i; + ACPI_FUNCTION_TRACE(ns_check_package_elements); + /* * Up to two groups of package elements are supported by the data * structure. All elements in each group must be of the same type. @@ -717,7 +719,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info, status = acpi_ns_check_object_type(info, this_element, type1, i + start_index); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } this_element++; @@ -728,11 +730,11 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info, type2, (i + count1 + start_index)); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } this_element++; } - return (AE_OK); + return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index ff2ab8fbec38..accfdcfb7e62 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -3,7 +3,7 @@ * * Module Name: nsrepair - Repair for objects returned by predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, * Try to fix if there was no return object. Warning if failed to fix. */ if (!return_object) { - if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { - if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { + if (expected_btypes) { + if (!(expected_btypes & ACPI_RTYPE_NONE) && + package_index != ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, @@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } - } else { + } + + if (expected_btypes != ACPI_RTYPE_NONE) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); + return (AE_AML_NO_RETURN_VALUE); } - - return (AE_AML_NO_RETURN_VALUE); } } @@ -316,7 +318,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct this_name = acpi_object_repair_info; while (this_name->object_converter) { - if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) { + if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) { /* Check if we can actually repair this name/type combination */ diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index a3bd6280882c..8dbb870f40d2 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -4,7 +4,7 @@ * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info, return_object_ptr); typedef struct acpi_repair_info { - char name[ACPI_NAME_SIZE]; + char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; acpi_repair_function repair_function; } acpi_repair_info; @@ -126,7 +126,7 @@ static const struct acpi_repair_info acpi_ns_repairable_names[] = { #define ACPI_FDE_FIELD_COUNT 5 #define ACPI_FDE_BYTE_BUFFER_SIZE 5 -#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * sizeof (u32)) +#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * (u32) sizeof (u32)) /****************************************************************************** * @@ -155,15 +155,17 @@ acpi_ns_complex_repairs(struct acpi_evaluate_info *info, const struct acpi_repair_info *predefined; acpi_status status; + ACPI_FUNCTION_TRACE(ns_complex_repairs); + /* Check if this name is in the list of repairable names */ predefined = acpi_ns_match_complex_repair(node); if (!predefined) { - return (validate_status); + return_ACPI_STATUS(validate_status); } status = predefined->repair_function(info, return_object_ptr); - return (status); + return_ACPI_STATUS(status); } /****************************************************************************** @@ -188,7 +190,7 @@ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct this_name = acpi_ns_repairable_names; while (this_name->repair_function) { - if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) { + if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) { return (this_name); } @@ -344,17 +346,19 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info, u16 original_ref_count; u32 i; + ACPI_FUNCTION_TRACE(ns_repair_CID); + /* Check for _CID as a simple string */ if (return_object->common.type == ACPI_TYPE_STRING) { status = acpi_ns_repair_HID(info, return_object_ptr); - return (status); + return_ACPI_STATUS(status); } /* Exit if not a Package */ if (return_object->common.type != ACPI_TYPE_PACKAGE) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* Examine each element of the _CID package */ @@ -366,7 +370,7 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info, status = acpi_ns_repair_HID(info, element_ptr); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } if (original_element != *element_ptr) { @@ -380,7 +384,7 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info, element_ptr++; } - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /****************************************************************************** @@ -495,12 +499,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, char *source; char *dest; - ACPI_FUNCTION_NAME(ns_repair_HID); + ACPI_FUNCTION_TRACE(ns_repair_HID); /* We only care about string _HID objects (not integers) */ if (return_object->common.type != ACPI_TYPE_STRING) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } if (return_object->string.length == 0) { @@ -511,14 +515,14 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, /* Return AE_OK anyway, let driver handle it */ info->return_flags |= ACPI_OBJECT_REPAIRED; - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* It is simplest to always create a new string object */ new_string = acpi_ut_create_string_object(return_object->string.length); if (!new_string) { - return (AE_NO_MEMORY); + return_ACPI_STATUS(AE_NO_MEMORY); } /* @@ -551,7 +555,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, acpi_ut_remove_reference(return_object); *return_object_ptr = new_string; - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /****************************************************************************** diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index a2bf4b2caa6c..49cc07e2ac5a 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -4,7 +4,7 @@ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -178,7 +178,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) } } - info->length = (ACPI_NAME_SIZE * info->num_segments) + + info->length = (ACPI_NAMESEG_SIZE * info->num_segments) + 4 + info->num_carats; info->next_external_char = next_external_char; @@ -249,7 +249,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) /* Build the name (minus path separators) */ for (; num_segments; num_segments--) { - for (i = 0; i < ACPI_NAME_SIZE; i++) { + for (i = 0; i < ACPI_NAMESEG_SIZE; i++) { if (ACPI_IS_PATH_SEPARATOR(*external_name) || (*external_name == 0)) { @@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) /* Move on the next segment */ external_name++; - result += ACPI_NAME_SIZE; + result += ACPI_NAMESEG_SIZE; } /* Terminate the string */ @@ -350,7 +350,7 @@ acpi_ns_internalize_name(const char *external_name, char **converted_name) * * FUNCTION: acpi_ns_externalize_name * - * PARAMETERS: internal_name_length - Lenth of the internal name below + * PARAMETERS: internal_name_length - Length of the internal name below * internal_name - Internal representation of name * converted_name_length - Where the length is returned * converted_name - Where the resulting external name @@ -489,12 +489,12 @@ acpi_ns_externalize_name(u32 internal_name_length, /* Copy and validate the 4-char name segment */ - ACPI_MOVE_NAME(&(*converted_name)[j], - &internal_name[names_index]); + ACPI_COPY_NAMESEG(&(*converted_name)[j], + &internal_name[names_index]); acpi_ut_repair_name(&(*converted_name)[j]); - j += ACPI_NAME_SIZE; - names_index += ACPI_NAME_SIZE; + j += ACPI_NAMESEG_SIZE; + names_index += ACPI_NAMESEG_SIZE; } } @@ -560,21 +560,9 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) void acpi_ns_terminate(void) { acpi_status status; - union acpi_operand_object *prev; - union acpi_operand_object *next; ACPI_FUNCTION_TRACE(ns_terminate); - /* Delete any module-level code blocks */ - - next = acpi_gbl_module_code_list; - while (next) { - prev = next; - next = next->method.mutex; - prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ - acpi_ut_remove_reference(prev); - } - /* * Free the entire namespace -- all nodes and all objects * attached to the nodes diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index e9a061da9bb2..5670ff5a43cd 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -3,7 +3,7 @@ * * Module Name: nswalk - Functions for walking the ACPI namespace * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -171,6 +171,12 @@ acpi_ns_walk_namespace(acpi_object_type type, start_node = acpi_gbl_root_node; } + /* Avoid walking the namespace if the StartNode is NULL */ + + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } + /* Null child means "get first node" */ parent_node = start_node; diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index b2915c9cceaf..1db831545ec8 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -4,7 +4,7 @@ * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -44,7 +44,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, acpi_status acpi_get_handle(acpi_handle parent, - acpi_string pathname, acpi_handle *ret_handle) + const char *pathname, acpi_handle *ret_handle) { acpi_status status; struct acpi_namespace_node *node = NULL; @@ -425,8 +425,8 @@ acpi_get_object_info(acpi_handle handle, } if (cls) { - next_id_string = acpi_ns_copy_device_id(&info->class_code, - cls, next_id_string); + (void)acpi_ns_copy_device_id(&info->class_code, + cls, next_id_string); } /* Copy the fixed-length data */ @@ -495,8 +495,8 @@ acpi_status acpi_install_method(u8 *buffer) /* Table must be a DSDT or SSDT */ - if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) && - !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) { + if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) && + !ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) { return (AE_BAD_HEADER); } @@ -516,7 +516,7 @@ acpi_status acpi_install_method(u8 *buffer) method_flags = *parser_state.aml++; aml_start = parser_state.aml; - aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start); + aml_length = (u32)ACPI_PTR_DIFF(parser_state.pkg_end, aml_start); /* * Allocate resources up-front. We don't want to have to delete a new diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index c022bef263e5..324269481160 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c @@ -24,7 +24,8 @@ ACPI_MODULE_NAME("nsxfobj") * * RETURN: Status * - * DESCRIPTION: This routine returns the type associatd with a particular handle + * DESCRIPTION: This routine returns the type associated with a particular + * handle * ******************************************************************************/ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type) diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 176d28d60125..6f6ae38ec044 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -3,7 +3,7 @@ * * Module Name: psargs - Parse AML opcode arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state); static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state); +static void acpi_ps_free_field_list(union acpi_parse_object *start); + /******************************************************************************* * * FUNCTION: acpi_ps_get_next_package_length @@ -150,21 +152,21 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) /* Two name segments */ - end += 1 + (2 * ACPI_NAME_SIZE); + end += 1 + (2 * ACPI_NAMESEG_SIZE); break; case AML_MULTI_NAME_PREFIX: /* Multiple name segments, 4 chars each, count in next byte */ - end += 2 + (*(end + 1) * ACPI_NAME_SIZE); + end += 2 + (*(end + 1) * ACPI_NAMESEG_SIZE); break; default: /* Single name segment */ - end += ACPI_NAME_SIZE; + end += ACPI_NAMESEG_SIZE; break; } @@ -522,7 +524,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state ACPI_MOVE_32_TO_32(&name, parser_state->aml); acpi_ps_set_name(field, name); - parser_state->aml += ACPI_NAME_SIZE; + parser_state->aml += ACPI_NAMESEG_SIZE; ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); @@ -685,6 +687,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /******************************************************************************* * + * FUNCTION: acpi_ps_free_field_list + * + * PARAMETERS: start - First Op in field list + * + * RETURN: None. + * + * DESCRIPTION: Free all Op objects inside a field list. + * + ******************************************************************************/ + +static void acpi_ps_free_field_list(union acpi_parse_object *start) +{ + union acpi_parse_object *cur = start; + union acpi_parse_object *next; + union acpi_parse_object *arg; + + while (cur) { + next = cur->common.next; + + /* AML_INT_CONNECTION_OP can have a single argument */ + + arg = acpi_ps_get_arg(cur, 0); + if (arg) { + acpi_ps_free_op(arg); + } + + acpi_ps_free_op(cur); + cur = next; + } +} + +/******************************************************************************* + * * FUNCTION: acpi_ps_get_next_arg * * PARAMETERS: walk_state - Current state @@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, while (parser_state->aml < parser_state->pkg_end) { field = acpi_ps_get_next_field(parser_state); if (!field) { + if (arg) { + acpi_ps_free_field_list(arg); + } + return_ACPI_STATUS(AE_NO_MEMORY); } @@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_NOT_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } } else { /* Single complex argument, nothing returned */ @@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_POSSIBLE_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index e00d1af6fa80..c989cadf271c 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -3,7 +3,7 @@ * * Module Name: psloop - Main AML parse loop * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -32,10 +32,6 @@ static acpi_status acpi_ps_get_arguments(struct acpi_walk_state *walk_state, u8 * aml_op_start, union acpi_parse_object *op); -static void -acpi_ps_link_module_code(union acpi_parse_object *parent_op, - u8 *aml_start, u32 aml_length, acpi_owner_id owner_id); - /******************************************************************************* * * FUNCTION: acpi_ps_get_arguments @@ -56,7 +52,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, { acpi_status status = AE_OK; union acpi_parse_object *arg = NULL; - const struct acpi_opcode_info *op_info; ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state); @@ -136,96 +131,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, walk_state->arg_count, walk_state->pass_number)); - /* - * This case handles the legacy option that groups all module-level - * code blocks together and defers execution until all of the tables - * are loaded. Execute all of these blocks at this time. - * Execute any module-level code that was detected during the table - * load phase. - * - * Note: this option is deprecated and will be eliminated in the - * future. Use of this option can cause problems with AML code that - * depends upon in-order immediate execution of module-level code. - */ - if (!acpi_gbl_execute_tables_as_methods && - (walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) && - ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) { - /* - * We want to skip If/Else/While constructs during Pass1 because we - * want to actually conditionally execute the code during Pass2. - * - * Except for disassembly, where we always want to walk the - * If/Else/While packages - */ - switch (op->common.aml_opcode) { - case AML_IF_OP: - case AML_ELSE_OP: - case AML_WHILE_OP: - /* - * Currently supported module-level opcodes are: - * IF/ELSE/WHILE. These appear to be the most common, - * and easiest to support since they open an AML - * package. - */ - if (walk_state->pass_number == - ACPI_IMODE_LOAD_PASS1) { - acpi_ps_link_module_code(op->common. - parent, - aml_op_start, - (u32) - (walk_state-> - parser_state. - pkg_end - - aml_op_start), - walk_state-> - owner_id); - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Pass1: Skipping an If/Else/While body\n")); - - /* Skip body of if/else/while in pass 1 */ - - walk_state->parser_state.aml = - walk_state->parser_state.pkg_end; - walk_state->arg_count = 0; - break; - - default: - /* - * Check for an unsupported executable opcode at module - * level. We must be in PASS1, the parent must be a SCOPE, - * The opcode class must be EXECUTE, and the opcode must - * not be an argument to another opcode. - */ - if ((walk_state->pass_number == - ACPI_IMODE_LOAD_PASS1) - && (op->common.parent->common.aml_opcode == - AML_SCOPE_OP)) { - op_info = - acpi_ps_get_opcode_info(op->common. - aml_opcode); - if ((op_info->class == - AML_CLASS_EXECUTE) && (!arg)) { - ACPI_WARNING((AE_INFO, - "Unsupported module-level executable opcode " - "0x%.2X at table offset 0x%.4X", - op->common. - aml_opcode, - (u32) - (ACPI_PTR_DIFF - (aml_op_start, - walk_state-> - parser_state. - aml_start) + - sizeof(struct - acpi_table_header)))); - } - } - break; - } - } - /* Special processing for certain opcodes */ switch (op->common.aml_opcode) { @@ -302,104 +207,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, /******************************************************************************* * - * FUNCTION: acpi_ps_link_module_code - * - * PARAMETERS: parent_op - Parent parser op - * aml_start - Pointer to the AML - * aml_length - Length of executable AML - * owner_id - owner_id of module level code - * - * RETURN: None. - * - * DESCRIPTION: Wrap the module-level code with a method object and link the - * object to the global list. Note, the mutex field of the method - * object is used to link multiple module-level code objects. - * - * NOTE: In this legacy option, each block of detected executable AML - * code that is outside of any control method is wrapped with a temporary - * control method object and placed on a global list below. - * - * This function executes the module-level code for all tables only after - * all of the tables have been loaded. It is a legacy option and is - * not compatible with other ACPI implementations. See acpi_ns_load_table. - * - * This function will be removed when the legacy option is removed. - * - ******************************************************************************/ - -static void -acpi_ps_link_module_code(union acpi_parse_object *parent_op, - u8 *aml_start, u32 aml_length, acpi_owner_id owner_id) -{ - union acpi_operand_object *prev; - union acpi_operand_object *next; - union acpi_operand_object *method_obj; - struct acpi_namespace_node *parent_node; - - ACPI_FUNCTION_TRACE(ps_link_module_code); - - /* Get the tail of the list */ - - prev = next = acpi_gbl_module_code_list; - while (next) { - prev = next; - next = next->method.mutex; - } - - /* - * Insert the module level code into the list. Merge it if it is - * adjacent to the previous element. - */ - if (!prev || - ((prev->method.aml_start + prev->method.aml_length) != aml_start)) { - - /* Create, initialize, and link a new temporary method object */ - - method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); - if (!method_obj) { - return_VOID; - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Create/Link new code block: %p\n", - method_obj)); - - if (parent_op->common.node) { - parent_node = parent_op->common.node; - } else { - parent_node = acpi_gbl_root_node; - } - - method_obj->method.aml_start = aml_start; - method_obj->method.aml_length = aml_length; - method_obj->method.owner_id = owner_id; - method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL; - - /* - * Save the parent node in next_object. This is cheating, but we - * don't want to expand the method object. - */ - method_obj->method.next_object = - ACPI_CAST_PTR(union acpi_operand_object, parent_node); - - if (!prev) { - acpi_gbl_module_code_list = method_obj; - } else { - prev->method.mutex = method_obj; - } - } else { - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Appending to existing code block: %p\n", - prev)); - - prev->method.aml_length += aml_length; - } - - return_VOID; -} - -/******************************************************************************* - * * FUNCTION: acpi_ps_parse_loop * * PARAMETERS: walk_state - Current state @@ -457,8 +264,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) ACPI_TO_POINTER (TRUE)); if (ACPI_FAILURE(status) - && ((status & AE_CODE_MASK) != - AE_CODE_CONTROL)) { + && !ACPI_CNTL_EXCEPTION(status)) { if (status == AE_AML_NO_RETURN_VALUE) { ACPI_EXCEPTION((AE_INFO, status, "Invoked method did not return a value")); diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index e1fd819a2955..496a1c1d5b0b 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -3,7 +3,7 @@ * * Module Name: psobject - Support for parse objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -481,8 +481,7 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, walk_state->opcode = (*op)->common.aml_opcode; status = walk_state->ascending_callback(walk_state); - status = - acpi_ps_next_parse_state(walk_state, *op, status); + (void)acpi_ps_next_parse_state(walk_state, *op, status); status2 = acpi_ps_complete_this_op(walk_state, *op); if (ACPI_FAILURE(status2)) { @@ -490,7 +489,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, } } - status = AE_OK; break; case AE_CTRL_BREAK: @@ -512,14 +510,13 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, walk_state->opcode = (*op)->common.aml_opcode; status = walk_state->ascending_callback(walk_state); - status = acpi_ps_next_parse_state(walk_state, *op, status); + (void)acpi_ps_next_parse_state(walk_state, *op, status); status2 = acpi_ps_complete_this_op(walk_state, *op); if (ACPI_FAILURE(status2)) { return_ACPI_STATUS(status2); } - status = AE_OK; break; case AE_CTRL_TERMINATE: @@ -639,7 +636,8 @@ acpi_status acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op, acpi_status status) { - acpi_status status2; + acpi_status return_status = status; + u8 ascending = TRUE; ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state); @@ -653,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, op)); do { if (op) { - if (walk_state->ascending_callback != NULL) { + if (ascending && walk_state->ascending_callback != NULL) { walk_state->op = op; walk_state->op_info = acpi_ps_get_opcode_info(op->common. @@ -675,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } if (status == AE_CTRL_TERMINATE) { - status = AE_OK; - - /* Clean up */ - do { - if (op) { - status2 = - acpi_ps_complete_this_op - (walk_state, op); - if (ACPI_FAILURE - (status2)) { - return_ACPI_STATUS - (status2); - } - } - - acpi_ps_pop_scope(& - (walk_state-> - parser_state), - &op, - &walk_state-> - arg_types, - &walk_state-> - arg_count); - - } while (op); - - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = AE_CTRL_TERMINATE; } else if (ACPI_FAILURE(status)) { /* First error is most important */ - (void) - acpi_ps_complete_this_op(walk_state, - op); - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = status; } } - status2 = acpi_ps_complete_this_op(walk_state, op); - if (ACPI_FAILURE(status2)) { - return_ACPI_STATUS(status2); + status = acpi_ps_complete_this_op(walk_state, op); + if (ACPI_FAILURE(status)) { + ascending = FALSE; + if (ACPI_SUCCESS(return_status) || + return_status == AE_CTRL_TERMINATE) { + return_status = status; + } } } @@ -727,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } while (op); - return_ACPI_STATUS(status); + return_ACPI_STATUS(return_status); } diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 8d7dc98bad17..bf6103986f48 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -3,7 +3,7 @@ * * Module Name: psopcode - Parser/Interpreter opcode information table * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -408,8 +408,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | AML_DEFER | AML_FIELD | AML_CREATE), /* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY, - AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, - AML_FLAGS_EXEC_1A_1T_0R), + AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, + AML_FLAGS_EXEC_1A_1T_1R), /* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), @@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R), + AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE), /* ACPI 5.0 opcodes */ diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index f310954eea59..532ea307a675 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -3,7 +3,7 @@ * * Module Name: psopinfo - AML opcode information functions and dispatch tables * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] = const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode) { -#ifdef ACPI_DEBUG_OUTPUT +#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT const char *opcode_name = "Unknown AML opcode"; #endif @@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode) default: break; } -#endif /* Unknown AML opcode */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode)); +#endif return (&acpi_gbl_aml_op_info[_UNK]); } diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 65603473b6cb..55a416e56fd8 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -3,7 +3,7 @@ * * Module Name: psparse - Parser top level AML parse routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -383,7 +383,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state, default: status = callback_status; - if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) { + if (ACPI_CNTL_EXCEPTION(callback_status)) { status = AE_OK; } break; @@ -508,8 +508,8 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) } /* - * If the transfer to the new method method call worked - *, a new walk state was created -- get it + * If the transfer to the new method method call worked, + * a new walk state was created -- get it */ walk_state = acpi_ds_get_current_walk_state(thread); continue; @@ -523,12 +523,12 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) if (status == AE_ABORT_METHOD) { acpi_ns_print_node_pathname(walk_state-> method_node, - "Method aborted:"); + "Aborting method"); acpi_os_printf("\n"); } else { - ACPI_ERROR_METHOD - ("Method parse/execution failed", - walk_state->method_node, NULL, status); + ACPI_ERROR_METHOD("Aborting method", + walk_state->method_node, NULL, + status); } acpi_ex_enter_interpreter(); diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index 00c67bc249aa..c4e4483f0a0b 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -3,7 +3,7 @@ * * Module Name: psscope - Parser scope stack management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 64a8329a17f1..5a285d3f2cdb 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -3,7 +3,7 @@ * * Module Name: pstree - Parser op tree manipulation/traversal/search * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index ef8a5805a836..ada1dc304d25 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -3,7 +3,7 @@ * * Module Name: psutils - Parser miscellaneous utilities (Parser only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index bd6af8c87d48..2f3ebcd8aebe 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -3,7 +3,7 @@ * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index 5743b22399a0..d480de075a90 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -3,7 +3,7 @@ * * Module Name: psxface - Parser external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 5737c3af1902..f92010e667cd 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -277,7 +277,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource, /* Validate the Resource Type */ if ((aml->address.resource_type > 2) && - (aml->address.resource_type < 0xC0)) { + (aml->address.resource_type < 0xC0) && + (aml->address.resource_type != 0x0A)) { return (FALSE); } diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index fcf129d27baa..242daf45e20e 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -320,6 +320,16 @@ acpi_rs_get_aml_length(struct acpi_resource *resource, break; + case ACPI_RESOURCE_TYPE_CLOCK_INPUT: + + total_size = (acpi_rs_length)(total_size + + resource->data. + clock_input. + resource_source. + string_length); + + break; + case ACPI_RESOURCE_TYPE_SERIAL_BUS: total_size = @@ -596,15 +606,17 @@ acpi_rs_get_list_length(u8 *aml_buffer, } break; - case ACPI_RESOURCE_NAME_SERIAL_BUS: + case ACPI_RESOURCE_NAME_SERIAL_BUS:{ - minimum_aml_resource_length = - acpi_gbl_resource_aml_serial_bus_sizes - [aml_resource->common_serial_bus.type]; - extra_struct_bytes += - aml_resource->common_serial_bus.resource_length - - minimum_aml_resource_length; - break; + minimum_aml_resource_length = + acpi_gbl_resource_aml_serial_bus_sizes + [aml_resource->common_serial_bus.type]; + extra_struct_bytes += + aml_resource->common_serial_bus. + resource_length - + minimum_aml_resource_length; + break; + } case ACPI_RESOURCE_NAME_PIN_CONFIG: @@ -650,6 +662,13 @@ acpi_rs_get_list_length(u8 *aml_buffer, break; + case ACPI_RESOURCE_NAME_CLOCK_INPUT: + extra_struct_bytes = + acpi_rs_stream_option_length(resource_length, + minimum_aml_resource_length); + + break; + default: break; @@ -677,10 +696,10 @@ acpi_rs_get_list_length(u8 *aml_buffer, *size_needed += buffer_size; ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, - "Type %.2X, AmlLength %.2X InternalLength %.2X\n", + "Type %.2X, AmlLength %.2X InternalLength %.2X%8X\n", acpi_ut_get_resource_type(aml_buffer), acpi_ut_get_descriptor_length(aml_buffer), - buffer_size)); + ACPI_FORMAT_UINT64(*size_needed))); /* * Point to the next resource within the AML stream using the length diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 570ea0df8a1b..c659b54985a5 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -312,6 +312,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, path_buffer.pointer = user_prt->source; status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } /* +1 to include null terminator */ diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index 6601e71b45e3..5b7d7074ce4f 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c @@ -48,6 +48,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource); static void acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table); +#ifdef ACPI_DEBUGGER /******************************************************************************* * * FUNCTION: acpi_rs_dump_resource_list @@ -87,6 +88,9 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list) ("Invalid descriptor type (%X) in resource list\n", resource_list->type); return; + } else if (!resource_list->type) { + ACPI_ERROR((AE_INFO, "Invalid Zero Resource Type")); + return; } /* Sanity check the length. It must not be zero, or we loop forever */ @@ -157,6 +161,7 @@ void acpi_rs_dump_irq_list(u8 *route_table) prt_element, prt_element->length); } } +#endif /******************************************************************************* * @@ -258,6 +263,11 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) table->pointer[*target & 0x07]); break; + case ACPI_RSD_6BITFLAG: + + acpi_rs_out_integer8(name, (ACPI_GET8(target) & 0x3F)); + break; + case ACPI_RSD_SHORTLIST: /* * Short byte list (single line output) for DMA and IRQ resources diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c index 77a3263169fa..998a79cc09c2 100644 --- a/drivers/acpi/acpica/rsdumpinfo.c +++ b/drivers/acpi/acpica/rsdumpinfo.c @@ -32,7 +32,7 @@ struct acpi_rsdump_info acpi_rs_dump_irq[7] = { acpi_gbl_he_decode}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity", acpi_gbl_ll_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count), "Interrupt Count", NULL}, @@ -222,7 +222,7 @@ struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = { "Triggering", acpi_gbl_he_decode}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity", acpi_gbl_ll_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL, NULL}, @@ -255,7 +255,7 @@ struct acpi_rsdump_info acpi_rs_dump_gpio[16] = { "ProducerConsumer", acpi_gbl_consume_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig", acpi_gbl_ppc_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction), "IoRestriction", acpi_gbl_ior_decode}, @@ -285,7 +285,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = { "RevisionId", NULL}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_function.pin_config), "PinConfig", acpi_gbl_ppc_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.sharable), "Sharing", + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.function_number), "FunctionNumber", NULL}, @@ -301,6 +301,23 @@ struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = { "VendorData", NULL}, }; +struct acpi_rsdump_info acpi_rs_dump_clock_input[7] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_clock_input), + "ClockInput", NULL}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(clock_input.revision_id), "RevisionId", + NULL}, + {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(clock_input.frequency_numerator), + "FrequencyNumerator", NULL}, + {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(clock_input.frequency_divisor), + "FrequencyDivisor", NULL}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(clock_input.scale), "Scale", + acpi_gbl_clock_input_scale}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(clock_input.mode), "Mode", + acpi_gbl_clock_input_mode}, + {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(clock_input.resource_source), + "ResourceSource", NULL}, +}; + struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = { {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_config), "PinConfig", NULL}, @@ -308,7 +325,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = { NULL}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.sharable), "Sharing", + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_config.pin_config_type), "PinConfigType", NULL}, @@ -353,7 +370,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_function[9] = { {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.sharable), + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_function.function_number), "FunctionNumber", NULL}, @@ -375,7 +392,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_config[10] = { "RevisionId", NULL}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.sharable), + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_config.pin_config_type), "PinConfigType", NULL}, @@ -421,6 +438,32 @@ struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[11] = { ACPI_RS_DUMP_COMMON_SERIAL_BUS }; +struct acpi_rsdump_info acpi_rs_dump_csi2_serial_bus[11] = { + { ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_csi2_serial_bus), + "Camera Serial Bus", NULL }, + { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.revision_id), + "RevisionId", NULL }, + { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.type), "Type", + acpi_gbl_sbt_decode }, + { ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.producer_consumer), + "ProducerConsumer", acpi_gbl_consume_decode }, + { ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.slave_mode), + "SlaveMode", acpi_gbl_sm_decode }, + { ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.phy_type), + "PhyType", acpi_gbl_phy_decode }, + { ACPI_RSD_6BITFLAG, + ACPI_RSD_OFFSET(csi2_serial_bus.local_port_instance), + "LocalPortInstance", NULL }, + { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.type_revision_id), + "TypeRevisionId", NULL }, + { ACPI_RSD_UINT16, ACPI_RSD_OFFSET(csi2_serial_bus.vendor_length), + "VendorLength", NULL }, + { ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(csi2_serial_bus.vendor_data), + "VendorData", NULL }, + { ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(csi2_serial_bus.resource_source), + "ResourceSource", NULL }, +}; + struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[14] = { {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus), "I2C Serial Bus", NULL}, diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c index 6e2e596902eb..ad7465ddfe13 100644 --- a/drivers/acpi/acpica/rsinfo.c +++ b/drivers/acpi/acpica/rsinfo.c @@ -49,6 +49,7 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = { acpi_rs_convert_pin_group, /* 0x16, ACPI_RESOURCE_TYPE_PIN_GROUP */ acpi_rs_convert_pin_group_function, /* 0x17, ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */ acpi_rs_convert_pin_group_config, /* 0x18, ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */ + acpi_rs_convert_clock_input, /* 0x19, ACPI_RESOURCE_TYPE_CLOCK_INPUT */ }; /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */ @@ -94,15 +95,17 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = { acpi_rs_convert_pin_group, /* 0x10, ACPI_RESOURCE_NAME_PIN_GROUP */ acpi_rs_convert_pin_group_function, /* 0x11, ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION */ acpi_rs_convert_pin_group_config, /* 0x12, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG */ + acpi_rs_convert_clock_input, /* 0x13, ACPI_RESOURCE_NAME_CLOCK_INPUT */ }; -/* Subtype table for serial_bus -- I2C, SPI, and UART */ +/* Subtype table for serial_bus -- I2C, SPI, UART, and CSI2 */ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = { NULL, acpi_rs_convert_i2c_serial_bus, acpi_rs_convert_spi_serial_bus, acpi_rs_convert_uart_serial_bus, + acpi_rs_convert_csi2_serial_bus }; #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER) @@ -135,6 +138,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = { acpi_rs_dump_pin_group, /* ACPI_RESOURCE_TYPE_PIN_GROUP */ acpi_rs_dump_pin_group_function, /* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */ acpi_rs_dump_pin_group_config, /* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */ + acpi_rs_dump_clock_input, /* ACPI_RESOURCE_TYPE_CLOCK_INPUT */ }; struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = { @@ -142,6 +146,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = { acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */ acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */ acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */ + acpi_rs_dump_csi2_serial_bus, /* AML_RESOURCE_CSI2_BUS_TYPE */ }; #endif @@ -176,6 +181,7 @@ const u8 acpi_gbl_aml_resource_sizes[] = { sizeof(struct aml_resource_pin_group), /* ACPI_RESOURCE_TYPE_PIN_GROUP */ sizeof(struct aml_resource_pin_group_function), /* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */ sizeof(struct aml_resource_pin_group_config), /* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */ + sizeof(struct aml_resource_clock_input), /* ACPI_RESOURCE_TYPE_CLOCK_INPUT */ }; const u8 acpi_gbl_resource_struct_sizes[] = { @@ -219,6 +225,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = { ACPI_RS_SIZE(struct acpi_resource_pin_group), ACPI_RS_SIZE(struct acpi_resource_pin_group_function), ACPI_RS_SIZE(struct acpi_resource_pin_group_config), + ACPI_RS_SIZE(struct acpi_resource_clock_input), }; const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = { @@ -226,6 +233,7 @@ const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = { sizeof(struct aml_resource_i2c_serialbus), sizeof(struct aml_resource_spi_serialbus), sizeof(struct aml_resource_uart_serialbus), + sizeof(struct aml_resource_csi2_serialbus), }; const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = { @@ -233,4 +241,5 @@ const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = { ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus), ACPI_RS_SIZE(struct acpi_resource_spi_serialbus), ACPI_RS_SIZE(struct acpi_resource_uart_serialbus), + ACPI_RS_SIZE(struct acpi_resource_csi2_serialbus), }; diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c index 134b67cd48ee..b0d970efa072 100644 --- a/drivers/acpi/acpica/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c @@ -54,7 +54,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[9] = { AML_OFFSET(irq.flags), 3}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, @@ -92,7 +92,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = { AML_OFFSET(irq.flags), 3}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, @@ -139,7 +139,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = { ACPI_ACTIVE_HIGH}, {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE, - ACPI_RS_OFFSET(data.irq.sharable), + ACPI_RS_OFFSET(data.irq.shareable), ACPI_EXCLUSIVE}, /* We can optimize to a 2-byte irq_no_flags() descriptor */ @@ -178,7 +178,7 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = { AML_OFFSET(extended_irq.flags), 2}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.shareable), AML_OFFSET(extended_irq.flags), 3}, diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 0307675d37be..e46efaa889cd 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -59,7 +59,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, AML_RESOURCE_MAX_SERIALBUSTYPE) { conversion_table = NULL; } else { - /* This is an I2C, SPI, or UART serial_bus descriptor */ + /* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */ conversion_table = acpi_gbl_convert_resource_serial_bus_dispatch @@ -89,6 +89,11 @@ acpi_rs_convert_aml_to_resources(u8 * aml, return_ACPI_STATUS(status); } + if (!resource->length) { + ACPI_EXCEPTION((AE_INFO, status, + "Zero-length resource returned from RsConvertAmlToResource")); + } + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Type %.2X, AmlLength %.2X InternalLength %.2X\n", acpi_ut_get_resource_type(aml), length, @@ -158,7 +163,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, AML_RESOURCE_MAX_SERIALBUSTYPE) { conversion_table = NULL; } else { - /* This is an I2C, SPI, or UART serial_bus descriptor */ + /* This is an I2C, SPI, UART or CSI2 serial_bus descriptor */ conversion_table = acpi_gbl_convert_resource_serial_bus_dispatch diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index 1763a3dbc9b1..6e8e98cf598d 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c @@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, */ count = INIT_TABLE_LENGTH(info); while (count) { + target = NULL; + /* * Source is the external AML byte stream buffer, * destination is the internal resource descriptor @@ -120,6 +122,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, ((ACPI_GET8(source) >> info->value) & 0x07)); break; + case ACPI_RSC_6BITFLAG: + /* + * Mask and shift the flag bits + */ + ACPI_SET8(destination, + ((ACPI_GET8(source) >> info->value) & 0x3F)); + break; + case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); @@ -184,7 +194,8 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, case ACPI_RSC_COUNT_SERIAL_VEN: - item_count = ACPI_GET16(source) - info->value; + ACPI_MOVE_16_TO_16(&temp16, source); + item_count = temp16 - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); @@ -192,9 +203,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, case ACPI_RSC_COUNT_SERIAL_RES: + ACPI_MOVE_16_TO_16(&temp16, source); item_count = (aml_resource_length + sizeof(struct aml_resource_large_header)) - - ACPI_GET16(source) - info->value; + - temp16 - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); @@ -279,9 +291,9 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, /* Copy the resource_source string */ + ACPI_MOVE_16_TO_16(&temp16, source); source = - ACPI_ADD_PTR(void, aml, - (ACPI_GET16(source) + info->value)); + ACPI_ADD_PTR(void, aml, (temp16 + info->value)); acpi_rs_move_data(target, source, item_count, info->opcode); break; @@ -509,6 +521,15 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, value)); break; + case ACPI_RSC_6BITFLAG: + /* + * Mask and shift the flag bits + */ + ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) + ((ACPI_GET8(source) & 0x3F) << info-> + value)); + break; + case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c index d073ebb51f90..279bfa27da94 100644 --- a/drivers/acpi/acpica/rsserial.c +++ b/drivers/acpi/acpica/rsserial.c @@ -39,7 +39,7 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = { AML_OFFSET(gpio.flags), 0}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.shareable), AML_OFFSET(gpio.int_flags), 3}, @@ -111,6 +111,55 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = { /******************************************************************************* * + * acpi_rs_convert_clock_input + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_clock_input[8] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_CLOCK_INPUT, + ACPI_RS_SIZE(struct acpi_resource_clock_input), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_clock_input)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_CLOCK_INPUT, + sizeof(struct aml_resource_clock_input), + 0} + , + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.clock_input.revision_id), + AML_OFFSET(clock_input.revision_id), + 1} + , + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.clock_input.mode), + AML_OFFSET(clock_input.flags), + 0} + , + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.clock_input.scale), + AML_OFFSET(clock_input.flags), + 1} + , + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.clock_input.frequency_divisor), + AML_OFFSET(clock_input.frequency_divisor), + 2} + , + + {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.clock_input.frequency_numerator), + AML_OFFSET(clock_input.frequency_numerator), + 4} + , + + /* Resource Source */ + {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.clock_input.resource_source), + 0, + sizeof(struct aml_resource_clock_input)} + , + +}; + +/******************************************************************************* + * * acpi_rs_convert_pinfunction * ******************************************************************************/ @@ -128,7 +177,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = { AML_OFFSET(pin_function.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.shareable), AML_OFFSET(pin_function.flags), 0}, @@ -187,6 +236,81 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = { /******************************************************************************* * + * acpi_rs_convert_csi2_serial_bus + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_csi2_serial_bus[14] = { + { ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, + ACPI_RS_SIZE(struct acpi_resource_csi2_serialbus), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_csi2_serial_bus) }, + + { ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, + sizeof(struct aml_resource_csi2_serialbus), + 0 }, + + { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), + AML_OFFSET(common_serial_bus.revision_id), + 1 }, + + { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type), + AML_OFFSET(csi2_serial_bus.type), + 1 }, + + { ACPI_RSC_1BITFLAG, + ACPI_RS_OFFSET(data.csi2_serial_bus.producer_consumer), + AML_OFFSET(csi2_serial_bus.flags), + 1 }, + + { ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.slave_mode), + AML_OFFSET(csi2_serial_bus.flags), + 0 }, + + { ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.phy_type), + AML_OFFSET(csi2_serial_bus.type_specific_flags), + 0 }, + + { ACPI_RSC_6BITFLAG, + ACPI_RS_OFFSET(data.csi2_serial_bus.local_port_instance), + AML_OFFSET(csi2_serial_bus.type_specific_flags), + 2 }, + + { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type_revision_id), + AML_OFFSET(csi2_serial_bus.type_revision_id), + 1 }, + + /* Vendor data */ + + { ACPI_RSC_COUNT_SERIAL_VEN, + ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_length), + AML_OFFSET(csi2_serial_bus.type_data_length), + AML_RESOURCE_CSI2_MIN_DATA_LEN }, + + { ACPI_RSC_MOVE_SERIAL_VEN, + ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_data), + 0, + sizeof(struct aml_resource_csi2_serialbus) }, + + /* Resource Source */ + + { ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.index), + AML_OFFSET(csi2_serial_bus.res_source_index), + 1 }, + + { ACPI_RSC_COUNT_SERIAL_RES, + ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_length), + AML_OFFSET(csi2_serial_bus.type_data_length), + sizeof(struct aml_resource_csi2_serialbus) }, + + { ACPI_RSC_MOVE_SERIAL_RES, + ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_ptr), + AML_OFFSET(csi2_serial_bus.type_data_length), + sizeof(struct aml_resource_csi2_serialbus) }, +}; + +/******************************************************************************* + * * acpi_rs_convert_i2c_serial_bus * ******************************************************************************/ @@ -518,7 +642,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_config[14] = { AML_OFFSET(pin_config.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.shareable), AML_OFFSET(pin_config.flags), 0}, @@ -658,7 +782,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[13] = { AML_OFFSET(pin_group_function.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.shareable), AML_OFFSET(pin_group_function.flags), 0}, @@ -735,7 +859,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[14] = { AML_OFFSET(pin_group_config.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.shareable), AML_OFFSET(pin_group_config.flags), 0}, diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index 1d6f136e4068..c62be3d91712 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c @@ -603,10 +603,10 @@ acpi_walk_resources(acpi_handle device_handle, /* Parameter validation */ if (!device_handle || !user_function || !name || - (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) && - !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) && - !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) && - !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) { + (!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__CRS) && + !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__PRS) && + !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__AEI) && + !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__DMA))) { return_ACPI_STATUS(AE_BAD_PARAMETER); } diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 862149c8a208..5b98e09fff76 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -3,7 +3,7 @@ * * Module Name: tbdata - Table manager data structure functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -89,14 +89,27 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc, { /* - * Initialize the table descriptor. Set the pointer to NULL, since the - * table is not fully mapped at this time. + * Initialize the table descriptor. Set the pointer to NULL for external + * tables, since the table is not fully mapped at this time. */ memset(table_desc, 0, sizeof(struct acpi_table_desc)); table_desc->address = address; table_desc->length = table->length; table_desc->flags = flags; ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature); + + switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) { + case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: + case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: + + table_desc->pointer = table; + break; + + case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: + default: + + break; + } } /******************************************************************************* @@ -132,9 +145,7 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc, case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: - table = ACPI_CAST_PTR(struct acpi_table_header, - ACPI_PHYSADDR_TO_PTR(table_desc-> - address)); + table = table_desc->pointer; break; default: @@ -196,6 +207,8 @@ acpi_tb_release_table(struct acpi_table_header *table, * PARAMETERS: table_desc - Table descriptor to be acquired * address - Address of the table * flags - Allocation flags of the table + * table - Pointer to the table (required for virtual + * origins, optional for physical) * * RETURN: Status * @@ -208,49 +221,52 @@ acpi_tb_release_table(struct acpi_table_header *table, acpi_status acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc, - acpi_physical_address address, u8 flags) + acpi_physical_address address, + u8 flags, struct acpi_table_header *table) { - struct acpi_table_header *table_header; + u8 mapped_table = FALSE; switch (flags & ACPI_TABLE_ORIGIN_MASK) { case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: /* Get the length of the full table from the header */ - table_header = - acpi_os_map_memory(address, - sizeof(struct acpi_table_header)); - if (!table_header) { - return (AE_NO_MEMORY); + if (!table) { + table = + acpi_os_map_memory(address, + sizeof(struct + acpi_table_header)); + if (!table) { + return (AE_NO_MEMORY); + } + + mapped_table = TRUE; } - acpi_tb_init_table_descriptor(table_desc, address, flags, - table_header); - acpi_os_unmap_memory(table_header, - sizeof(struct acpi_table_header)); - return (AE_OK); + break; case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: - table_header = ACPI_CAST_PTR(struct acpi_table_header, - ACPI_PHYSADDR_TO_PTR(address)); - if (!table_header) { - return (AE_NO_MEMORY); + if (!table) { + return (AE_BAD_PARAMETER); } - acpi_tb_init_table_descriptor(table_desc, address, flags, - table_header); - return (AE_OK); + break; default: - break; + /* Table is not valid yet */ + + return (AE_NO_MEMORY); } - /* Table is not valid yet */ + acpi_tb_init_table_descriptor(table_desc, address, flags, table); + if (mapped_table) { + acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); + } - return (AE_NO_MEMORY); + return (AE_OK); } /******************************************************************************* @@ -335,7 +351,19 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc) acpi_tb_release_table(table_desc->pointer, table_desc->length, table_desc->flags); - table_desc->pointer = NULL; + + switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) { + case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: + + table_desc->pointer = NULL; + break; + + case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: + case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: + default: + + break; + } return_VOID; } @@ -480,7 +508,8 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, /* If a particular signature is expected (DSDT/FACS), it must match */ - if (signature && !ACPI_COMPARE_NAME(&table_desc->signature, signature)) { + if (signature && + !ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) { ACPI_BIOS_ERROR((AE_INFO, "Invalid signature 0x%X for ACPI table, expected [%s]", table_desc->signature.integer, signature)); @@ -493,7 +522,7 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, /* Verify the checksum */ status = - acpi_tb_verify_checksum(table_desc->pointer, + acpi_ut_verify_checksum(table_desc->pointer, table_desc->length); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, @@ -749,6 +778,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + acpi_ns_delete_namespace_by_owner(owner_id); acpi_ut_release_write_lock(&acpi_gbl_namespace_rw_lock); return_ACPI_STATUS(status); @@ -931,19 +961,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node) } status = acpi_ns_load_table(table_index, parent_node); - - /* - * This case handles the legacy option that groups all module-level - * code blocks together and defers execution until all of the tables - * are loaded. Execute all of these blocks at this time. - * Execute any module-level code that was detected during the table - * load phase. - * - * Note: this option is deprecated and will be eliminated in the - * future. Use of this option can cause problems with AML code that - * depends upon in-order immediate execution of module-level code. - */ - acpi_ns_exec_module_code_list(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } /* * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is @@ -967,6 +987,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node) * * PARAMETERS: address - Physical address of the table * flags - Allocation flags of the table + * table - Pointer to the table (required for + * virtual origins, optional for + * physical) * override - Whether override should be performed * table_index - Where table index is returned * @@ -978,7 +1001,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node) acpi_status acpi_tb_install_and_load_table(acpi_physical_address address, - u8 flags, u8 override, u32 *table_index) + u8 flags, + struct acpi_table_header *table, + u8 override, u32 *table_index) { acpi_status status; u32 i; @@ -987,7 +1012,7 @@ acpi_tb_install_and_load_table(acpi_physical_address address, /* Install the table and load it into the namespace */ - status = acpi_tb_install_standard_table(address, flags, TRUE, + status = acpi_tb_install_standard_table(address, flags, table, TRUE, override, &i); if (ACPI_FAILURE(status)) { goto exit; diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 99d325a51816..c6658b2f3027 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -3,7 +3,7 @@ * * Module Name: tbfadt - FADT table utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -298,7 +298,7 @@ void acpi_tb_parse_fadt(void) * Validate the FADT checksum before we copy the table. Ignore * checksum error as we want to try to get the DSDT and FACS. */ - (void)acpi_tb_verify_checksum(table, length); + (void)acpi_ut_verify_checksum(table, length); /* Create a local copy of the FADT in common ACPI 2.0+ format */ @@ -313,25 +313,21 @@ void acpi_tb_parse_fadt(void) acpi_tb_install_standard_table((acpi_physical_address)acpi_gbl_FADT. Xdsdt, ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, &acpi_gbl_dsdt_index); - - /* If Hardware Reduced flag is set, there is no FACS */ - - if (!acpi_gbl_reduced_hardware) { - if (acpi_gbl_FADT.facs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.facs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, - &acpi_gbl_facs_index); - } - if (acpi_gbl_FADT.Xfacs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.Xfacs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, - &acpi_gbl_xfacs_index); - } + NULL, FALSE, TRUE, &acpi_gbl_dsdt_index); + + if (acpi_gbl_FADT.facs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.facs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL, FALSE, TRUE, + &acpi_gbl_facs_index); + } + if (acpi_gbl_FADT.Xfacs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.Xfacs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL, FALSE, TRUE, + &acpi_gbl_xfacs_index); } } @@ -556,7 +552,7 @@ static void acpi_tb_convert_fadt(void) * 64-bit X length field. * Note: If the legacy length field is > 0xFF bits, ignore * this check. (GPE registers can be larger than the - * 64-bit GAS structure can accomodate, 0xFF bits). + * 64-bit GAS structure can accommodate, 0xFF bits). */ if ((ACPI_MUL_8(length) <= ACPI_UINT8_MAX) && (address64->bit_width != diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index f00694b1d000..d71a73216380 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -3,7 +3,7 @@ * * Module Name: tbfind - find table * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -56,16 +56,16 @@ acpi_tb_find_table(char *signature, /* Normalize the input strings */ memset(&header, 0, sizeof(struct acpi_table_header)); - ACPI_MOVE_NAME(header.signature, signature); - strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); - strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); + ACPI_COPY_NAMESEG(header.signature, signature); + memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); + memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); /* Search for the table */ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature), - header.signature, ACPI_NAME_SIZE)) { + header.signature, ACPI_NAMESEG_SIZE)) { /* Not the requested table */ @@ -94,14 +94,14 @@ acpi_tb_find_table(char *signature, if (!memcmp (acpi_gbl_root_table_list.tables[i].pointer->signature, - header.signature, ACPI_NAME_SIZE) && (!oem_id[0] - || - !memcmp - (acpi_gbl_root_table_list. - tables[i].pointer-> - oem_id, - header.oem_id, - ACPI_OEM_ID_SIZE)) + header.signature, ACPI_NAMESEG_SIZE) && (!oem_id[0] + || + !memcmp + (acpi_gbl_root_table_list. + tables[i]. + pointer->oem_id, + header.oem_id, + ACPI_OEM_ID_SIZE)) && (!oem_table_id[0] || !memcmp(acpi_gbl_root_table_list.tables[i].pointer-> oem_table_id, header.oem_table_id, diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 5f8e7b561c90..ee9b85bc238b 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -3,7 +3,7 @@ * * Module Name: tbinstal - ACPI table installation and removal * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -79,6 +79,8 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, * PARAMETERS: address - Address of the table (might be a virtual * address depending on the table_flags) * flags - Flags for the table + * table - Pointer to the table (required for virtual + * origins, optional for physical) * reload - Whether reload should be performed * override - Whether override should be performed * table_index - Where the table index is returned @@ -96,6 +98,7 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, acpi_status acpi_tb_install_standard_table(acpi_physical_address address, u8 flags, + struct acpi_table_header *table, u8 reload, u8 override, u32 *table_index) { u32 i; @@ -106,7 +109,8 @@ acpi_tb_install_standard_table(acpi_physical_address address, /* Acquire a temporary table descriptor for validation */ - status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags); + status = + acpi_tb_acquire_temp_table(&new_table_desc, address, flags, table); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not acquire table length at %8.8X%8.8X", @@ -120,7 +124,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, */ if (!reload && acpi_gbl_disable_ssdt_table_install && - ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) { + ACPI_COMPARE_NAMESEG(&new_table_desc.signature, ACPI_SIG_SSDT)) { ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X", new_table_desc.signature.ascii, ACPI_FORMAT_UINT64(address))); @@ -209,7 +213,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc) if (ACPI_SUCCESS(status) && table) { acpi_tb_acquire_temp_table(&new_table_desc, ACPI_PTR_TO_PHYSADDR(table), - ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL); + ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, + table); ACPI_ERROR_ONLY(override_type = "Logical"); goto finish_override; } @@ -220,7 +225,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc) &address, &length); if (ACPI_SUCCESS(status) && address && length) { acpi_tb_acquire_temp_table(&new_table_desc, address, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL); + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL); ACPI_ERROR_ONLY(override_type = "Physical"); goto finish_override; } @@ -289,7 +295,8 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc) if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) { - ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address)); + ACPI_FREE(table_desc->pointer); + table_desc->pointer = NULL; } table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL); diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index e303418a895b..e5631027f7f1 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -3,13 +3,14 @@ * * Module Name: tbprint - Table output utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" +#include "acutils.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbprint") @@ -39,7 +40,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length) { while (length && *string) { - if (!isprint((int)*string)) { + if (!isprint((int)(u8)*string)) { *string = '?'; } @@ -69,10 +70,10 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, memcpy(out_header, header, sizeof(struct acpi_table_header)); - acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE); + acpi_tb_fix_string(out_header->signature, ACPI_NAMESEG_SIZE); acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE); - acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE); + acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAMESEG_SIZE); } /******************************************************************************* @@ -94,14 +95,20 @@ acpi_tb_print_table_header(acpi_physical_address address, { struct acpi_table_header local_header; - if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { +#pragma GCC diagnostic push +#if defined(__GNUC__) && __GNUC__ >= 11 +#pragma GCC diagnostic ignored "-Wstringop-overread" +#endif + + if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X", header->signature, ACPI_FORMAT_UINT64(address), header->length)); - } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) { + } else if (ACPI_VALIDATE_RSDP_SIG(ACPI_CAST_PTR(struct acpi_table_rsdp, + header)->signature)) { /* RSDP has no common fields */ @@ -119,6 +126,14 @@ acpi_tb_print_table_header(acpi_physical_address address, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision, local_header.oem_id)); + } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) { + + /* CDAT does not use the common ACPI table header */ + + ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X", + ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address), + ACPI_CAST_PTR(struct acpi_table_cdat, + header)->length)); } else { /* Standard ACPI table with full common header */ @@ -133,78 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address, local_header.asl_compiler_id, local_header.asl_compiler_revision)); } -} - -/******************************************************************************* - * - * FUNCTION: acpi_tb_validate_checksum - * - * PARAMETERS: table - ACPI table to verify - * length - Length of entire table - * - * RETURN: Status - * - * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns - * exception on bad checksum. - * - ******************************************************************************/ - -acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) -{ - u8 checksum; - - /* - * FACS/S3PT: - * They are the odd tables, have no standard ACPI header and no checksum - */ - - if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) || - ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) { - return (AE_OK); - } - - /* Compute the checksum on the table */ - - checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); - - /* Checksum ok? (should be zero) */ - - if (checksum) { - ACPI_BIOS_WARNING((AE_INFO, - "Incorrect checksum in table [%4.4s] - 0x%2.2X, " - "should be 0x%2.2X", - table->signature, table->checksum, - (u8)(table->checksum - checksum))); - -#if (ACPI_CHECKSUM_ABORT) - return (AE_BAD_CHECKSUM); -#endif - } - - return (AE_OK); -} - -/******************************************************************************* - * - * FUNCTION: acpi_tb_checksum - * - * PARAMETERS: buffer - Pointer to memory region to be checked - * length - Length of this memory region - * - * RETURN: Checksum (u8) - * - * DESCRIPTION: Calculates circular checksum of memory region. - * - ******************************************************************************/ - -u8 acpi_tb_checksum(u8 *buffer, u32 length) -{ - u8 sum = 0; - u8 *end = buffer + length; - - while (buffer < end) { - sum = (u8)(sum + *(buffer++)); - } - - return (sum); +#pragma GCC diagnostic pop } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index b526096560b5..fa64851c7b62 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -3,7 +3,7 @@ * * Module Name: tbutils - ACPI Table utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -18,7 +18,6 @@ ACPI_MODULE_NAME("tbutils") static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); -#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_tb_initialize_facs @@ -36,12 +35,7 @@ acpi_status acpi_tb_initialize_facs(void) { struct acpi_table_facs *facs; - /* If Hardware Reduced flag is set, there is no FACS */ - - if (acpi_gbl_reduced_hardware) { - acpi_gbl_FACS = NULL; - return (AE_OK); - } else if (acpi_gbl_FADT.Xfacs && + if (acpi_gbl_FADT.Xfacs && (!acpi_gbl_FADT.facs || !acpi_gbl_use32_bit_facs_addresses)) { (void)acpi_get_table_by_index(acpi_gbl_xfacs_index, @@ -61,7 +55,6 @@ acpi_status acpi_tb_initialize_facs(void) return (AE_OK); } -#endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * @@ -165,6 +158,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) { + u32 address32; u64 address64; /* @@ -176,8 +170,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) * 32-bit platform, RSDT: Return 32-bit table entry * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return */ - return ((acpi_physical_address) - (*ACPI_CAST_PTR(u32, table_entry))); + ACPI_MOVE_32_TO_32(&address32, table_entry); + return address32; } else { /* * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return @@ -299,7 +293,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address) /* Validate the root table checksum */ - status = acpi_tb_verify_checksum(table, length); + status = acpi_ut_verify_checksum(table, length); if (ACPI_FAILURE(status)) { acpi_os_unmap_memory(table, length); return_ACPI_STATUS(status); @@ -328,13 +322,13 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address) status = acpi_tb_install_standard_table(address, ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, + NULL, FALSE, TRUE, &table_index); if (ACPI_SUCCESS(status) && - ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. - tables[table_index].signature, - ACPI_SIG_FADT)) { + ACPI_COMPARE_NAMESEG(&acpi_gbl_root_table_list. + tables[table_index].signature, + ACPI_SIG_FADT)) { acpi_gbl_fadt_index = table_index; acpi_tb_parse_fadt(); } diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index e4d0dc8948cd..a8f07d2641b6 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -3,7 +3,7 @@ * * Module Name: tbxface - ACPI table-oriented external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -108,7 +108,7 @@ acpi_initialize_tables(struct acpi_table_desc *initial_table_array, /* * Get the root table (RSDT or XSDT) and extract all entries to the local * Root Table Array. This array contains the information of the RSDT/XSDT - * in a common, more useable format. + * in a common, more usable format. */ status = acpi_tb_parse_root_table(rsdp_address); return_ACPI_STATUS(status); @@ -169,7 +169,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) if (!acpi_gbl_enable_table_validation) { /* * Now it's safe to do full table validation. We can do deferred - * table initilization here once the flag is set. + * table initialization here once the flag is set. */ acpi_gbl_enable_table_validation = TRUE; for (i = 0; i < acpi_gbl_root_table_list.current_table_count; @@ -202,14 +202,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table) * * PARAMETERS: signature - ACPI signature of needed table * instance - Which instance (for SSDTs) - * out_table_header - The pointer to the table header to fill + * out_table_header - The pointer to the where the table header + * is returned * - * RETURN: Status and pointer to mapped table header + * RETURN: Status and a copy of the table header * - * DESCRIPTION: Finds an ACPI table header. - * - * NOTE: Caller is responsible in unmapping the header with - * acpi_os_unmap_memory + * DESCRIPTION: Finds and returns an ACPI table header. Caller provides the + * memory where a copy of the header is to be returned + * (fixed length). * ******************************************************************************/ acpi_status @@ -230,7 +230,7 @@ acpi_get_table_header(char *signature, for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count; i++) { - if (!ACPI_COMPARE_NAME + if (!ACPI_COMPARE_NAMESEG (&(acpi_gbl_root_table_list.tables[i].signature), signature)) { continue; @@ -323,7 +323,7 @@ acpi_get_table(char *signature, i++) { table_desc = &acpi_gbl_root_table_list.tables[i]; - if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) { + if (!ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) { continue; } diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 9011297552af..2a17c60a9a39 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -3,7 +3,7 @@ * * Module Name: tbxfload - Table load/unload external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -69,23 +69,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_load_tables(void) "While loading namespace from ACPI tables")); } - if (acpi_gbl_execute_tables_as_methods) { - /* - * If the module-level code support is enabled, initialize the objects - * in the namespace that remain uninitialized. This runs the executable - * AML that may be part of the declaration of these name objects: - * operation_regions, buffer_fields, Buffers, and Packages. - * - * Note: The module-level code is optional at this time, but will - * become the default in the future. - */ - status = acpi_ns_initialize_objects(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + /* + * Initialize the objects in the namespace that remain uninitialized. + * This runs the executable AML that may be part of the declaration of + * these name objects: + * operation_regions, buffer_fields, Buffers, and Packages. + * + */ + status = acpi_ns_initialize_objects(); + if (ACPI_SUCCESS(status)) { + acpi_gbl_namespace_initialized = TRUE; } - acpi_gbl_namespace_initialized = TRUE; return_ACPI_STATUS(status); } @@ -123,7 +118,7 @@ acpi_status acpi_tb_load_namespace(void) table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index]; if (!acpi_gbl_root_table_list.current_table_count || - !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) || + !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) || ACPI_FAILURE(acpi_tb_validate_table(table))) { status = AE_NO_ACPI_TABLES; goto unlock_and_exit; @@ -175,11 +170,12 @@ acpi_status acpi_tb_load_namespace(void) table = &acpi_gbl_root_table_list.tables[i]; if (!table->address || - (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT) - && !ACPI_COMPARE_NAME(table->signature.ascii, - ACPI_SIG_PSDT) - && !ACPI_COMPARE_NAME(table->signature.ascii, - ACPI_SIG_OSDT)) + (!ACPI_COMPARE_NAMESEG + (table->signature.ascii, ACPI_SIG_SSDT) + && !ACPI_COMPARE_NAMESEG(table->signature.ascii, + ACPI_SIG_PSDT) + && !ACPI_COMPARE_NAMESEG(table->signature.ascii, + ACPI_SIG_OSDT)) || ACPI_FAILURE(acpi_tb_validate_table(table))) { continue; } @@ -231,9 +227,7 @@ unlock_and_exit: * * FUNCTION: acpi_install_table * - * PARAMETERS: address - Address of the ACPI table to be installed. - * physical - Whether the address is a physical table - * address or not + * PARAMETERS: table - Pointer to the ACPI table to be installed. * * RETURN: Status * @@ -244,22 +238,17 @@ unlock_and_exit: ******************************************************************************/ acpi_status ACPI_INIT_FUNCTION -acpi_install_table(acpi_physical_address address, u8 physical) +acpi_install_table(struct acpi_table_header *table) { acpi_status status; - u8 flags; u32 table_index; ACPI_FUNCTION_TRACE(acpi_install_table); - if (physical) { - flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL; - } else { - flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL; - } - - status = acpi_tb_install_standard_table(address, flags, - FALSE, FALSE, &table_index); + status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table), + ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, + table, FALSE, FALSE, + &table_index); return_ACPI_STATUS(status); } @@ -268,10 +257,43 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table) /******************************************************************************* * + * FUNCTION: acpi_install_physical_table + * + * PARAMETERS: address - Address of the ACPI table to be installed. + * + * RETURN: Status + * + * DESCRIPTION: Dynamically install an ACPI table. + * Note: This function should only be invoked after + * acpi_initialize_tables() and before acpi_load_tables(). + * + ******************************************************************************/ +acpi_status ACPI_INIT_FUNCTION +acpi_install_physical_table(acpi_physical_address address) +{ + acpi_status status; + u32 table_index; + + ACPI_FUNCTION_TRACE(acpi_install_physical_table); + + status = acpi_tb_install_standard_table(address, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL, FALSE, FALSE, + &table_index); + + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL_INIT(acpi_install_physical_table) + +/******************************************************************************* + * * FUNCTION: acpi_load_table * * PARAMETERS: table - Pointer to a buffer containing the ACPI * table to be loaded. + * table_idx - Pointer to a u32 for storing the table + * index, might be NULL * * RETURN: Status * @@ -282,7 +304,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table) * to ensure that the table is not deleted or unmapped. * ******************************************************************************/ -acpi_status acpi_load_table(struct acpi_table_header *table) +acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx) { acpi_status status; u32 table_index; @@ -300,7 +322,18 @@ acpi_status acpi_load_table(struct acpi_table_header *table) ACPI_INFO(("Host-directed Dynamic ACPI Table Load:")); status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table), ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, - FALSE, &table_index); + table, FALSE, &table_index); + if (table_idx) { + *table_idx = table_index; + } + + if (ACPI_SUCCESS(status)) { + + /* Complete the initialization/resolution of new objects */ + + acpi_ns_initialize_objects(); + } + return_ACPI_STATUS(status); } @@ -369,7 +402,7 @@ acpi_status acpi_unload_parent_table(acpi_handle object) * only these types can contain AML and thus are the only types * that can create namespace objects. */ - if (ACPI_COMPARE_NAME + if (ACPI_COMPARE_NAMESEG (acpi_gbl_root_table_list.tables[i].signature.ascii, ACPI_SIG_DSDT)) { status = AE_TYPE; @@ -387,3 +420,35 @@ acpi_status acpi_unload_parent_table(acpi_handle object) } ACPI_EXPORT_SYMBOL(acpi_unload_parent_table) +/******************************************************************************* + * + * FUNCTION: acpi_unload_table + * + * PARAMETERS: table_index - Index as returned by acpi_load_table + * + * RETURN: Status + * + * DESCRIPTION: Via the table_index representing an SSDT or OEMx table, unloads + * the table and deletes all namespace objects associated with + * that table. Unloading of the DSDT is not allowed. + * Note: Mainly intended to support hotplug removal of SSDTs. + * + ******************************************************************************/ +acpi_status acpi_unload_table(u32 table_index) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_unload_table); + + if (table_index == 1) { + + /* table_index==1 means DSDT is the owner. DSDT cannot be unloaded */ + + return_ACPI_STATUS(AE_TYPE); + } + + status = acpi_tb_unload_table(table_index); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_unload_table) diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 483d0ce5180a..961577ba9486 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -3,7 +3,7 @@ * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -74,14 +74,14 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) /* Check the standard checksum */ - if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { + if (acpi_ut_checksum((u8 *)rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { return (AE_BAD_CHECKSUM); } /* Check extended checksum if table version >= 2 */ if ((rsdp->revision >= 2) && - (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { + (acpi_ut_checksum((u8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { return (AE_BAD_CHECKSUM); } @@ -114,6 +114,7 @@ acpi_find_root_pointer(acpi_physical_address *table_address) u8 *table_ptr; u8 *mem_rover; u32 physical_address; + u32 ebda_window_size; ACPI_FUNCTION_TRACE(acpi_find_root_pointer); @@ -139,26 +140,37 @@ acpi_find_root_pointer(acpi_physical_address *table_address) /* EBDA present? */ - if (physical_address > 0x400) { + /* + * Check that the EBDA pointer from memory is sane and does not point + * above valid low memory + */ + if (physical_address > 0x400 && physical_address < 0xA0000) { + /* + * Calculate the scan window size + * The EBDA is not guaranteed to be larger than a ki_b and in case + * that it is smaller, the scanning function would leave the low + * memory and continue to the VGA range. + */ + ebda_window_size = ACPI_MIN(ACPI_EBDA_WINDOW_SIZE, + 0xA0000 - physical_address); + /* - * 1b) Search EBDA paragraphs (EBDA is required to be a - * minimum of 1K length) + * 1b) Search EBDA paragraphs */ table_ptr = acpi_os_map_memory((acpi_physical_address) physical_address, - ACPI_EBDA_WINDOW_SIZE); + ebda_window_size); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", - physical_address, ACPI_EBDA_WINDOW_SIZE)); + physical_address, ebda_window_size)); return_ACPI_STATUS(AE_NO_MEMORY); } mem_rover = - acpi_tb_scan_memory_for_rsdp(table_ptr, - ACPI_EBDA_WINDOW_SIZE); - acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE); + acpi_tb_scan_memory_for_rsdp(table_ptr, ebda_window_size); + acpi_os_unmap_memory(table_ptr, ebda_window_size); if (mem_rover) { diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index dbabe680ff58..c673d6c95e0a 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -3,7 +3,7 @@ * * Module Name: utaddress - op_region address range check * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index 8cbcd7d6bd5e..2418a312733a 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -3,7 +3,7 @@ * * Module Name: utalloc - local memory allocation routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index 04ff61e284f5..259c28d3fecd 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -3,7 +3,7 @@ * * Module Name: utascii - Utility ascii functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -30,7 +30,7 @@ u8 acpi_ut_valid_nameseg(char *name) /* Validate each character in the signature */ - for (i = 0; i < ACPI_NAME_SIZE; i++) { + for (i = 0; i < ACPI_NAMESEG_SIZE; i++) { if (!acpi_ut_valid_name_char(name[i], i)) { return (FALSE); } diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index fffa6f5ae59e..f6e6e98e9523 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -3,7 +3,7 @@ * * Module Name: utbuffer - Buffer dump routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -37,7 +37,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) u32 j; u32 temp32; u8 buf_char; + u32 display_data_only = display & DB_DISPLAY_DATA_ONLY; + display &= ~DB_DISPLAY_DATA_ONLY; if (!buffer) { acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n"); return; @@ -53,7 +55,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) /* Print current offset */ - acpi_os_printf("%8.4X: ", (base_offset + i)); + if (!display_data_only) { + acpi_os_printf("%8.4X: ", (base_offset + i)); + } /* Print 16 hex chars */ @@ -109,32 +113,34 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) * Print the ASCII equivalent characters but watch out for the bad * unprintable ones (printable chars are 0x20 through 0x7E) */ - acpi_os_printf(" "); - for (j = 0; j < 16; j++) { - if (i + j >= count) { - acpi_os_printf("\n"); - return; + if (!display_data_only) { + acpi_os_printf(" "); + for (j = 0; j < 16; j++) { + if (i + j >= count) { + acpi_os_printf("\n"); + return; + } + + /* + * Add comment characters so rest of line is ignored when + * compiled + */ + if (j == 0) { + acpi_os_printf("// "); + } + + buf_char = buffer[(acpi_size)i + j]; + if (isprint(buf_char)) { + acpi_os_printf("%c", buf_char); + } else { + acpi_os_printf("."); + } } - /* - * Add comment characters so rest of line is ignored when - * compiled - */ - if (j == 0) { - acpi_os_printf("// "); - } + /* Done with that line. */ - buf_char = buffer[(acpi_size)i + j]; - if (isprint(buf_char)) { - acpi_os_printf("%c", buf_char); - } else { - acpi_os_printf("."); - } + acpi_os_printf("\n"); } - - /* Done with that line. */ - - acpi_os_printf("\n"); i += 16; } diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 97d6ec174c28..cabec193febb 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -3,7 +3,7 @@ * * Module Name: utcache - local cache allocation routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache) } else { /* The cache is empty, create a new object */ +#ifdef ACPI_DBG_TRACK_ALLOCATIONS ACPI_MEM_TRACKING(cache->total_allocated++); -#ifdef ACPI_DBG_TRACK_ALLOCATIONS if ((cache->total_allocated - cache->total_freed) > cache->max_occupied) { cache->max_occupied = diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c new file mode 100644 index 000000000000..e6f6030b3a3f --- /dev/null +++ b/drivers/acpi/acpica/utcksum.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/****************************************************************************** + * + * Module Name: utcksum - Support generating table checksums + * + * Copyright (C) 2000 - 2025, Intel Corp. + * + *****************************************************************************/ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acutils.h" + +/* This module used for application-level code only */ + +#define _COMPONENT ACPI_CA_DISASSEMBLER +ACPI_MODULE_NAME("utcksum") + +/******************************************************************************* + * + * FUNCTION: acpi_ut_verify_checksum + * + * PARAMETERS: table - ACPI table to verify + * length - Length of entire table + * + * RETURN: Status + * + * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns + * exception on bad checksum. + * Note: We don't have to check for a CDAT here, since CDAT is + * not in the RSDT/XSDT, and the CDAT table is never installed + * via ACPICA. + * + ******************************************************************************/ +acpi_status acpi_ut_verify_checksum(struct acpi_table_header *table, u32 length) +{ + u8 checksum; + + /* + * FACS/S3PT: + * They are the odd tables, have no standard ACPI header and no checksum + */ + if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) || + ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) { + return (AE_OK); + } + + /* Compute the checksum on the table */ + + length = table->length; + checksum = + acpi_ut_generate_checksum(ACPI_CAST_PTR(u8, table), length, + table->checksum); + + /* Computed checksum matches table? */ + + if (checksum != table->checksum) { + ACPI_BIOS_WARNING((AE_INFO, + "Incorrect checksum in table [%4.4s] - 0x%2.2X, " + "should be 0x%2.2X", + table->signature, table->checksum, + table->checksum - checksum)); + +#if (ACPI_CHECKSUM_ABORT) + return (AE_BAD_CHECKSUM); +#endif + } + + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_verify_cdat_checksum + * + * PARAMETERS: table - CDAT ACPI table to verify + * length - Length of entire table + * + * RETURN: Status + * + * DESCRIPTION: Verifies that the CDAT table checksums to zero. Optionally + * returns an exception on bad checksum. + * + ******************************************************************************/ + +acpi_status +acpi_ut_verify_cdat_checksum(struct acpi_table_cdat *cdat_table, u32 length) +{ + u8 checksum; + + /* Compute the checksum on the table */ + + checksum = acpi_ut_generate_checksum(ACPI_CAST_PTR(u8, cdat_table), + cdat_table->length, + cdat_table->checksum); + + /* Computed checksum matches table? */ + + if (checksum != cdat_table->checksum) { + ACPI_BIOS_WARNING((AE_INFO, + "Incorrect checksum in table [%4.4s] - 0x%2.2X, " + "should be 0x%2.2X", + acpi_gbl_CDAT, cdat_table->checksum, + checksum)); + +#if (ACPI_CHECKSUM_ABORT) + return (AE_BAD_CHECKSUM); +#endif + } + + cdat_table->checksum = checksum; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_generate_checksum + * + * PARAMETERS: table - Pointer to table to be checksummed + * length - Length of the table + * original_checksum - Value of the checksum field + * + * RETURN: 8 bit checksum of buffer + * + * DESCRIPTION: Computes an 8 bit checksum of the table. + * + ******************************************************************************/ + +u8 acpi_ut_generate_checksum(void *table, u32 length, u8 original_checksum) +{ + u8 checksum; + + /* Sum the entire table as-is */ + + checksum = acpi_ut_checksum((u8 *)table, length); + + /* Subtract off the existing checksum value in the table */ + + checksum = (u8)(checksum - original_checksum); + + /* Compute and return the final checksum */ + + checksum = (u8)(0 - checksum); + return (checksum); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_checksum + * + * PARAMETERS: buffer - Pointer to memory region to be checked + * length - Length of this memory region + * + * RETURN: Checksum (u8) + * + * DESCRIPTION: Calculates circular checksum of memory region. + * + ******************************************************************************/ + +u8 acpi_ut_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) { + sum = (u8)(sum + *(buffer++)); + } + + return (sum); +} diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index a872ed7879ca..80458e70ac2b 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -3,7 +3,7 @@ * * Module Name: utcopy - Internal to external object translation utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); - if (ACPI_FAILURE(status)) { - - /* On failure, delete the destination package object */ - - acpi_ut_remove_reference(dest_obj); - } - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index aabdc25effd9..9f197e293c7e 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -3,7 +3,7 @@ * * Module Name: utdebug - Debug print/trace routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -37,7 +37,12 @@ void acpi_ut_init_stack_ptr_trace(void) { acpi_size current_sp; +#pragma GCC diagnostic push +#if defined(__GNUC__) && __GNUC__ >= 12 +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif acpi_gbl_entry_stack_pointer = ¤t_sp; +#pragma GCC diagnostic pop } /******************************************************************************* @@ -57,7 +62,12 @@ void acpi_ut_track_stack_ptr(void) acpi_size current_sp; if (¤t_sp < acpi_gbl_lowest_stack_pointer) { +#pragma GCC diagnostic push +#if defined(__GNUC__) && __GNUC__ >= 12 +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif acpi_gbl_lowest_stack_pointer = ¤t_sp; +#pragma GCC diagnostic pop } if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) { @@ -158,7 +168,7 @@ acpi_debug_print(u32 requested_debug_level, * Display the module name, current line number, thread ID (if requested), * current procedure nesting level, and the current procedure name */ - acpi_os_printf("%9s-%04ld ", module_name, line_number); + acpi_os_printf("%9s-%04d ", module_name, line_number); #ifdef ACPI_APPLICATION /* @@ -177,7 +187,7 @@ acpi_debug_print(u32 requested_debug_level, fill_count = 0; } - acpi_os_printf("[%02ld] %*s", + acpi_os_printf("[%02d] %*s", acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " "); acpi_os_printf("%s%*s: ", acpi_ut_trim_function_name(function_name), fill_count, diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index dad02b821e19..b82130d1a8bc 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -3,7 +3,7 @@ * * Module Name: utdecode - Utility decoding routines (value-to-string) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -78,7 +78,8 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "IPMI", /* 0x07 */ "GeneralPurposeIo", /* 0x08 */ "GenericSerialBus", /* 0x09 */ - "PCC" /* 0x0A */ + "PCC", /* 0x0A */ + "PlatformRtMechanism" /* 0x0B */ }; const char *acpi_ut_get_region_name(u8 space_id) @@ -239,7 +240,7 @@ const char *acpi_ut_get_node_name(void *object) { struct acpi_namespace_node *node = (struct acpi_namespace_node *)object; - /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */ + /* Must return a string of exactly 4 characters == ACPI_NAMESEG_SIZE */ if (!object) { return ("NULL"); @@ -284,7 +285,7 @@ const char *acpi_ut_get_node_name(void *object) static const char *acpi_gbl_desc_type_names[] = { /* 00 */ "Not a Descriptor", - /* 01 */ "Cached", + /* 01 */ "Cached Object", /* 02 */ "State-Generic", /* 03 */ "State-Update", /* 04 */ "State-Package", @@ -295,10 +296,10 @@ static const char *acpi_gbl_desc_type_names[] = { /* 09 */ "State-Result", /* 10 */ "State-Notify", /* 11 */ "State-Thread", - /* 12 */ "Walk", - /* 13 */ "Parser", - /* 14 */ "Operand", - /* 15 */ "Node" + /* 12 */ "Tree Walk State", + /* 13 */ "Parse Tree Op", + /* 14 */ "Operand Object", + /* 15 */ "Namespace Node" }; const char *acpi_ut_get_descriptor_name(void *object) @@ -430,8 +431,10 @@ static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = { /* 0C */ "Reserved (was previously Shutdown Request)", /* Reserved in ACPI 6.0 */ /* 0D */ "System Resource Affinity Update", - /* 0E */ "Heterogeneous Memory Attributes Update" + /* 0E */ "Heterogeneous Memory Attributes Update", /* ACPI 6.2 */ + /* 0F */ "Error Disconnect Recover" + /* ACPI 6.3 */ }; static const char *acpi_gbl_device_notify[5] = { @@ -461,13 +464,13 @@ static const char *acpi_gbl_thermal_notify[5] = { const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type) { - /* 00 - 0D are "common to all object types" (from ACPI Spec) */ + /* 00 - 0F are "common to all object types" (from ACPI Spec) */ if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) { return (acpi_gbl_generic_notify[notify_value]); } - /* 0E - 7F are reserved */ + /* 10 - 7F are reserved */ if (notify_value <= ACPI_MAX_SYS_NOTIFY) { return ("Reserved"); diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8cc4392c61f3..e8180099d01f 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -112,7 +112,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) gpe_block); } - /*lint -fallthrough */ + ACPI_FALLTHROUGH; case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: @@ -140,7 +140,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) (void) acpi_os_delete_semaphore (acpi_gbl_global_lock_semaphore); - acpi_gbl_global_lock_semaphore = NULL; + acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL; acpi_os_delete_mutex(object->mutex.os_mutex); acpi_gbl_global_lock_mutex = NULL; @@ -157,7 +157,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) object, object->event.os_semaphore)); (void)acpi_os_delete_semaphore(object->event.os_semaphore); - object->event.os_semaphore = NULL; + object->event.os_semaphore = ACPI_SEMAPHORE_NULL; break; case ACPI_TYPE_METHOD: @@ -257,6 +257,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) acpi_ut_delete_object_desc(second_desc); } + if (object->field.internal_pcc_buffer) { + ACPI_FREE(object->field.internal_pcc_buffer); + } + break; case ACPI_TYPE_BUFFER_FIELD: @@ -281,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) } break; + case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: + + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, + "***** Address handler %p\n", object)); + + acpi_os_delete_mutex(object->address_space.context_mutex); + break; + default: break; @@ -392,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) object, object->common.type, acpi_ut_get_object_type_name(object), new_count)); - message = "Incremement"; + message = "Increment"; break; case REF_DECREMENT: @@ -410,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); + return; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, @@ -448,13 +461,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) * * FUNCTION: acpi_ut_update_object_reference * - * PARAMETERS: object - Increment ref count for this object - * and all sub-objects + * PARAMETERS: object - Increment or decrement the ref count for + * this object and all sub-objects * action - Either REF_INCREMENT or REF_DECREMENT * * RETURN: Status * - * DESCRIPTION: Increment the object reference count + * DESCRIPTION: Increment or decrement the object reference count * * Object references are incremented when: * 1) An object is attached to a Node (namespace object) @@ -488,7 +501,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } /* - * All sub-objects must have their reference count incremented + * All sub-objects must have their reference count updated * also. Different object types have different subobjects. */ switch (object->common.type) { @@ -555,6 +568,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) break; } } + next_object = NULL; break; @@ -563,11 +577,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -608,6 +617,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index e47430272692..918aca7c4db4 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -39,7 +39,7 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_warning(const char *module_name, u32 line_number, char *pathname, - u8 node_flags, const char *format, ...) + u16 node_flags, const char *format, ...) { va_list arg_list; @@ -81,7 +81,7 @@ acpi_ut_predefined_warning(const char *module_name, void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_info(const char *module_name, u32 line_number, - char *pathname, u8 node_flags, const char *format, ...) + char *pathname, u16 node_flags, const char *format, ...) { va_list arg_list; @@ -124,7 +124,7 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_bios_error(const char *module_name, u32 line_number, char *pathname, - u8 node_flags, const char *format, ...) + u16 node_flags, const char *format, ...) { va_list arg_list; @@ -183,19 +183,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, case AE_ALREADY_EXISTS: acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Failure creating"; + message = "Failure creating named object"; break; case AE_NOT_FOUND: acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Could not resolve"; + message = "Could not resolve symbol"; break; default: acpi_os_printf(ACPI_MSG_ERROR); - message = "Failure resolving"; + message = "Failure resolving symbol"; break; } @@ -317,7 +317,8 @@ acpi_ut_method_error(const char *module_name, } acpi_ns_print_node_pathname(node, message); - acpi_os_printf(", %s", acpi_format_exception(method_status)); + acpi_os_printf(" due to previous error (%s)", + acpi_format_exception(method_status)); ACPI_MSG_SUFFIX; ACPI_MSG_REDIRECT_END; diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index c56ae6e058d5..abc6583ed369 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -3,7 +3,7 @@ * * Module Name: uteval - Object evaluation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index f8c5b49344df..97c55a113bae 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -3,7 +3,7 @@ * * Module Name: utglobal - Global variables for the ACPI subsystem * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index 3d63a9e8da4f..8cd050e9cad5 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -3,7 +3,7 @@ * * Module Name: uthex -- Hex/ASCII support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 70e6bf1107a1..eb88335dea2c 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -3,7 +3,7 @@ * * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -263,8 +263,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, * 3) Size of the actual CID strings */ cid_list_size = sizeof(struct acpi_pnp_device_id_list) + - ((count - 1) * sizeof(struct acpi_pnp_device_id)) + - string_area_size; + (count * sizeof(struct acpi_pnp_device_id)) + string_area_size; cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); if (!cid_list) { @@ -289,9 +288,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, value); length = ACPI_EISAID_STRING_SIZE; } else { /* ACPI_TYPE_STRING */ - /* Copy the String CID from the returned object */ - strcpy(next_id_string, cid_objects[i]->string.pointer); length = cid_objects[i]->string.length + 1; } diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 0646ed62b351..4bef97e8223a 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -3,7 +3,7 @@ * * Module Name: utinit - Common ACPI subsystem initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -154,7 +154,7 @@ acpi_status acpi_ut_init_globals(void) /* Global Lock support */ - acpi_gbl_global_lock_semaphore = NULL; + acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL; acpi_gbl_global_lock_mutex = NULL; acpi_gbl_global_lock_acquired = FALSE; acpi_gbl_global_lock_handle = 0; @@ -180,7 +180,6 @@ acpi_status acpi_ut_init_globals(void) /* Namespace */ - acpi_gbl_module_code_list = NULL; acpi_gbl_root_node = NULL; acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME; acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED; diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index d61e01bd01a3..123dbcbc60bc 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -3,7 +3,7 @@ * * Module Name: utlock - Reader/Writer lock interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index afaadc73196b..8638efacdbf4 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -59,10 +59,10 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table) /* These are the only tables that contain executable AML */ - if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) || - ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) || - ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) || - ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) || + if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) || + ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) || + ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) || + ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) || ACPI_IS_OEM_SIG(table->signature)) { return (TRUE); } diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index ae6d8cc18cec..272e46208263 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -3,7 +3,7 @@ * * Module Name: utobject - ACPI object create/delete/size/cache routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -44,7 +44,7 @@ acpi_ut_get_element_length(u8 object_type, * * NOTE: We always allocate the worst-case object descriptor because * these objects are cached, and we want them to be - * one-size-satisifies-any-request. This in itself may not be + * one-size-satisfies-any-request. This in itself may not be * the most memory efficient, but the efficiency of the object * cache should more than make up for this! * diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 902a47463abf..f6ac16729e42 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -3,7 +3,7 @@ * * Module Name: utosi - Support for the _OSI predefined control method * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -65,13 +65,17 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ - {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */ + {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8_1}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */ {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */ {"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */ {"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */ {"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */ + {"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */ + {"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */ + {"Windows 2021", NULL, 0, ACPI_OSI_WIN_11}, /* Windows 11 - Added 01/2022 */ + {"Windows 2022", NULL, 0, ACPI_OSI_WIN_11_22H2}, /* Windows 11 version 22H2 - Added 04/2024 */ /* Feature Group Strings */ diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c index 5eb8b76ce9d8..d3525ef8ed28 100644 --- a/drivers/acpi/acpica/utownerid.c +++ b/drivers/acpi/acpica/utownerid.c @@ -38,7 +38,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id) if (*owner_id) { ACPI_ERROR((AE_INFO, - "Owner ID [0x%2.2X] already exists", *owner_id)); + "Owner ID [0x%3.3X] already exists", *owner_id)); return_ACPI_STATUS(AE_ALREADY_EXISTS); } @@ -88,14 +88,14 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id) /* * Construct encoded ID from the index and bit position * - * Note: Last [j].k (bit 255) is never used and is marked + * Note: Last [j].k (bit 4095) is never used and is marked * permanently allocated (prevents +1 overflow) */ *owner_id = (acpi_owner_id)((k + 1) + ACPI_MUL_32(j)); ACPI_DEBUG_PRINT((ACPI_DB_VALUES, - "Allocated OwnerId: %2.2X\n", + "Allocated OwnerId: 0x%3.3X\n", (unsigned int)*owner_id)); goto exit; } @@ -116,7 +116,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id) */ status = AE_OWNER_ID_LIMIT; ACPI_ERROR((AE_INFO, - "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT")); + "Could not allocate new OwnerId (4095 max), AE_OWNER_ID_LIMIT")); exit: (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); @@ -153,7 +153,7 @@ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr) /* Zero is not a valid owner_ID */ if (owner_id == 0) { - ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id)); + ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%3.3X", owner_id)); return_VOID; } @@ -179,7 +179,7 @@ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr) acpi_gbl_owner_id_mask[index] ^= bit; } else { ACPI_ERROR((AE_INFO, - "Release of non-allocated OwnerId: 0x%2.2X", + "Attempted release of non-allocated OwnerId: 0x%3.3X", owner_id + 1)); } diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 65ca9807c2a8..d9bd80e2d32a 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -3,7 +3,7 @@ * * Module Name: utpredef - support functions for predefined names * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -84,7 +84,7 @@ const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name) this_name = acpi_gbl_predefined_methods; while (this_name->info.name[0]) { - if (ACPI_COMPARE_NAME(name, this_name->info.name)) { + if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) { return (this_name); } @@ -151,7 +151,7 @@ static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types); static const char *ut_external_type_names[] = /* Indexed by ACPI_TYPE_* */ { - ", UNSUPPORTED-TYPE", + ", Type_ANY", ", Integer", ", String", ", Buffer", @@ -201,7 +201,7 @@ const union acpi_predefined_info *acpi_ut_match_resource_name(char *name) this_name = acpi_gbl_resource_names; while (this_name->info.name[0]) { - if (ACPI_COMPARE_NAME(name, this_name->info.name)) { + if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) { return (this_name); } @@ -311,8 +311,7 @@ static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types) for (i = 0; i < arg_count; i++) { this_argument_type = METHOD_GET_NEXT_TYPE(argument_types); - if (!this_argument_type - || (this_argument_type > METHOD_MAX_ARG_TYPE)) { + if (this_argument_type > METHOD_MAX_ARG_TYPE) { printf("**** Invalid argument type (%u) " "in predefined info structure\n", this_argument_type); diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index a98c334c3bb7..423d10569736 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -3,7 +3,7 @@ * * Module Name: utprint - Formatted printing routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -332,6 +332,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) int i; pos = string; + + size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string)); end = string + size; for (; *format; ++format) { @@ -470,7 +472,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) case 'X': type |= ACPI_FORMAT_UPPER; - /* FALLTHROUGH */ + ACPI_FALLTHROUGH; case 'x': diff --git a/drivers/acpi/acpica/utresdecode.c b/drivers/acpi/acpica/utresdecode.c index 0a9c337346e8..d801d9069841 100644 --- a/drivers/acpi/acpica/utresdecode.c +++ b/drivers/acpi/acpica/utresdecode.c @@ -82,6 +82,13 @@ const char *acpi_gbl_mtp_decode[] = { "AddressRangeNVS" }; +const char *acpi_gbl_phy_decode[] = { + "Type C", + "Type D", + "Unknown Type", + "Unknown Type" +}; + const char *acpi_gbl_rng_decode[] = { "InvalidRanges", "NonISAOnlyRanges", @@ -161,7 +168,8 @@ const char *acpi_gbl_sbt_decode[] = { "/* UNKNOWN serial bus type */", "I2C", "SPI", - "UART" + "UART", + "CSI2" }; /* I2C serial bus access mode */ @@ -276,4 +284,15 @@ const char *acpi_gbl_ptyp_decode[] = { "Input Schmitt Trigger", }; +const char *acpi_gbl_clock_input_mode[] = { + "Fixed", + "Variable", +}; + +const char *acpi_gbl_clock_input_scale[] = { + "Hz", + "KHz", + "MHz", +}; + #endif diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index cba5505171da..e1cc3d348750 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -57,6 +57,8 @@ const u8 acpi_gbl_resource_aml_sizes[] = { ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group), ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_function), ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_config), + ACPI_AML_SIZE_LARGE(struct aml_resource_clock_input), + }; const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = { @@ -64,6 +66,7 @@ const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = { ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus), ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus), ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus), + ACPI_AML_SIZE_LARGE(struct aml_resource_csi2_serialbus), }; /* @@ -113,6 +116,7 @@ static const u8 acpi_gbl_resource_types[] = { ACPI_VARIABLE_LENGTH, /* 10 pin_group */ ACPI_VARIABLE_LENGTH, /* 11 pin_group_function */ ACPI_VARIABLE_LENGTH, /* 12 pin_group_config */ + ACPI_VARIABLE_LENGTH, /* 13 clock_input */ }; /******************************************************************************* diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 5bef0b059406..aae71b8c55d2 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c @@ -141,25 +141,25 @@ void acpi_ut_repair_name(char *name) * Special case for the root node. This can happen if we get an * error during the execution of module-level code. */ - if (ACPI_COMPARE_NAME(name, ACPI_ROOT_PATHNAME)) { + if (ACPI_COMPARE_NAMESEG(name, ACPI_ROOT_PATHNAME)) { return; } - ACPI_MOVE_NAME(&original_name, name); + ACPI_COPY_NAMESEG(&original_name, &name[0]); /* Check each character in the name */ - for (i = 0; i < ACPI_NAME_SIZE; i++) { + for (i = 0; i < ACPI_NAMESEG_SIZE; i++) { if (acpi_ut_valid_name_char(name[i], i)) { continue; } /* * Replace a bad character with something printable, yet technically - * still invalid. This prevents any collisions with existing "good" + * "odd". This prevents any collisions with existing "good" * names in the namespace. */ - name[i] = '*'; + name[i] = '_'; found_bad_char = TRUE; } @@ -169,8 +169,8 @@ void acpi_ut_repair_name(char *name) if (!acpi_gbl_enable_interpreter_slack) { ACPI_WARNING((AE_INFO, - "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]", - original_name, name)); + "Invalid character(s) in name (0x%.8X) %p, repaired: [%4.4s]", + original_name, name, &name[0])); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]", diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c index 05ff20049b87..199982a6fb16 100644 --- a/drivers/acpi/acpica/utstrsuppt.c +++ b/drivers/acpi/acpica/utstrsuppt.c @@ -45,10 +45,15 @@ acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr) /* Convert each ASCII byte in the input string */ while (*string) { - - /* Character must be ASCII 0-7, otherwise terminate with no error */ - + /* + * Character must be ASCII 0-7, otherwise: + * 1) Runtime: terminate with no error, per the ACPI spec + * 2) Compiler: return an error + */ if (!(ACPI_IS_OCTAL_DIGIT(*string))) { +#ifdef ACPI_ASL_COMPILER + status = AE_BAD_OCTAL_CONSTANT; +#endif break; } @@ -94,10 +99,15 @@ acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr) /* Convert each ASCII byte in the input string */ while (*string) { - - /* Character must be ASCII 0-9, otherwise terminate with no error */ - - if (!isdigit(*string)) { + /* + * Character must be ASCII 0-9, otherwise: + * 1) Runtime: terminate with no error, per the ACPI spec + * 2) Compiler: return an error + */ + if (!isdigit((int)*string)) { +#ifdef ACPI_ASL_COMPILER + status = AE_BAD_DECIMAL_CONSTANT; +#endif break; } @@ -143,10 +153,15 @@ acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr) /* Convert each ASCII byte in the input string */ while (*string) { - - /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */ - - if (!isxdigit(*string)) { + /* + * Character must be ASCII A-F, a-f, or 0-9, otherwise: + * 1) Runtime: terminate with no error, per the ACPI spec + * 2) Compiler: return an error + */ + if (!isxdigit((int)*string)) { +#ifdef ACPI_ASL_COMPILER + status = AE_BAD_HEX_CONSTANT; +#endif break; } diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 016a6621cc6f..a99c4c9e3d39 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -3,7 +3,7 @@ * * Module Name: uttrack - Memory allocation tracking routines (debug only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -588,6 +588,18 @@ void acpi_ut_dump_allocations(u32 component, const char *module) acpi_ut_get_descriptor_name (descriptor)); + /* Optional object hex dump */ + + if (acpi_gbl_verbose_leak_dump) { + acpi_os_printf("\n"); + acpi_ut_dump_buffer((u8 *) + descriptor, + element-> + size, + DB_BYTE_DISPLAY, + 0); + } + /* Validate the descriptor type using Type field and length */ descriptor_type = 0; /* Not a valid descriptor type */ @@ -648,7 +660,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module) case ACPI_DESC_TYPE_PARSER: acpi_os_printf - ("AmlOpcode 0x%04hX\n", + ("AmlOpcode 0x%04X\n", descriptor->op.asl. aml_opcode); break; diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 59ae118092a3..0682554934ca 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -3,7 +3,7 @@ * * Module Name: utuuid -- UUID support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -61,4 +61,45 @@ void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer) 1]); } } + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_uuid_to_string + * + * PARAMETERS: uuid_buffer - 16-byte UUID buffer + * out_string - 36-byte formatted UUID string + * + * RETURN: Status + * + * DESCRIPTION: Convert 16-byte UUID buffer to 36-byte formatted UUID string + * out_string must be 37 bytes to include null terminator. + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string) +{ + u32 i; + + if (!uuid_buffer || !out_string) { + return (AE_BAD_PARAMETER); + } + + for (i = 0; i < UUID_BUFFER_LENGTH; i++) { + out_string[acpi_gbl_map_to_uuid_offset[i]] = + acpi_ut_hex_to_ascii_char(uuid_buffer[i], 4); + + out_string[acpi_gbl_map_to_uuid_offset[i] + 1] = + acpi_ut_hex_to_ascii_char(uuid_buffer[i], 0); + } + + /* Insert required hyphens (dashes) */ + + out_string[UUID_HYPHEN1_OFFSET] = + out_string[UUID_HYPHEN2_OFFSET] = + out_string[UUID_HYPHEN3_OFFSET] = + out_string[UUID_HYPHEN4_OFFSET] = '-'; + + out_string[UUID_STRING_LENGTH] = 0; /* Null terminate */ + return (AE_OK); +} #endif diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index d2d6cc065181..56942b5f026b 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -3,7 +3,7 @@ * * Module Name: utxface - External interfaces, miscellaneous utility functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 6bb85d691fcb..a1ed7fced4db 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -187,6 +187,50 @@ ACPI_EXPORT_SYMBOL(acpi_bios_error) /******************************************************************************* * + * FUNCTION: acpi_bios_exception + * + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * status - Status value to be decoded/formatted + * format - Printf format string + additional args + * + * RETURN: None + * + * DESCRIPTION: Print an "ACPI Firmware Error" message with module/line/version + * info as well as decoded acpi_status. + * + ******************************************************************************/ +void ACPI_INTERNAL_VAR_XFACE +acpi_bios_exception(const char *module_name, + u32 line_number, + acpi_status status, const char *format, ...) +{ + va_list arg_list; + + ACPI_MSG_REDIRECT_BEGIN; + + /* For AE_OK, just print the message */ + + if (ACPI_SUCCESS(status)) { + acpi_os_printf(ACPI_MSG_BIOS_ERROR); + + } else { + acpi_os_printf(ACPI_MSG_BIOS_ERROR "%s, ", + acpi_format_exception(status)); + } + + va_start(arg_list, format); + acpi_os_vprintf(format, arg_list); + ACPI_MSG_SUFFIX; + va_end(arg_list); + + ACPI_MSG_REDIRECT_END; +} + +ACPI_EXPORT_SYMBOL(acpi_bios_exception) + +/******************************************************************************* + * * FUNCTION: acpi_bios_warning * * PARAMETERS: module_name - Caller's module name (for warning output) diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index e3c60f57c9f0..c1702f8fba67 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -3,7 +3,7 @@ * * Module Name: utxfinit - External interfaces for ACPICA initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2025, Intel Corp. * *****************************************************************************/ @@ -120,6 +120,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) */ acpi_gbl_early_initialization = FALSE; + /* + * Obtain a permanent mapping for the FACS. This is required for the + * Global Lock and the Firmware Waking Vector + */ + if (!(flags & ACPI_NO_FACS_INIT)) { + status = acpi_tb_initialize_facs(); + if (ACPI_FAILURE(status)) { + ACPI_WARNING((AE_INFO, "Could not map the FACS table")); + return_ACPI_STATUS(status); + } + } + #if (!ACPI_REDUCED_HARDWARE) /* Enable ACPI mode */ @@ -138,18 +150,6 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) } /* - * Obtain a permanent mapping for the FACS. This is required for the - * Global Lock and the Firmware Waking Vector - */ - if (!(flags & ACPI_NO_FACS_INIT)) { - status = acpi_tb_initialize_facs(); - if (ACPI_FAILURE(status)) { - ACPI_WARNING((AE_INFO, "Could not map the FACS table")); - return_ACPI_STATUS(status); - } - } - - /* * Initialize ACPI Event handling (Fixed and General Purpose) * * Note1: We must have the hardware and events initialized before we can @@ -211,24 +211,17 @@ acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags) ACPI_FUNCTION_TRACE(acpi_initialize_objects); +#ifdef ACPI_OBSOLETE_BEHAVIOR /* - * This case handles the legacy option that groups all module-level - * code blocks together and defers execution until all of the tables - * are loaded. Execute all of these blocks at this time. - * Execute any module-level code that was detected during the table - * load phase. - * - * Note: this option is deprecated and will be eliminated in the - * future. Use of this option can cause problems with AML code that - * depends upon in-order immediate execution of module-level code. + * 05/2019: Removed, initialization now happens at both object + * creation and table load time */ - acpi_ns_exec_module_code_list(); /* * Initialize the objects that remain uninitialized. This * runs the executable AML that may be part of the - * declaration of these objects: - * operation_regions, buffer_fields, Buffers, and Packages. + * declaration of these objects: operation_regions, buffer_fields, + * bank_fields, Buffers, and Packages. */ if (!(flags & ACPI_NO_OBJECT_INIT)) { status = acpi_ns_initialize_objects(); @@ -236,6 +229,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags) return_ACPI_STATUS(status); } } +#endif /* * Initialize all device/region objects in the namespace. This runs diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 52ae5438edeb..070c07d68dfb 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -23,6 +23,7 @@ config ACPI_APEI_GHES select ACPI_HED select IRQ_WORK select GENERIC_ALLOCATOR + select ARM_SDE_INTERFACE if ARM64 help Generic Hardware Error Source provides a way to report platform hardware errors (such as that from chipset). It @@ -41,19 +42,9 @@ config ACPI_APEI_PCIEAER Turn on this option to enable the corresponding support. config ACPI_APEI_SEA - bool "APEI Synchronous External Abort logging/recovering support" + bool depends on ARM64 && ACPI_APEI_GHES default y - help - This option should be enabled if the system supports - firmware first handling of SEA (Synchronous External Abort). - SEA happens with certain faults of data abort or instruction - abort synchronous exceptions on ARMv8 systems. If a system - supports firmware first handling of SEA, the platform analyzes - and handles hardware error notifications from SEA, and it may then - form a HW error record for the OS to parse and handle. This - option allows the OS to look for such hardware error record, and - take appropriate action. config ACPI_APEI_MEMORY_FAILURE bool "APEI memory error recovering support" @@ -70,6 +61,19 @@ config ACPI_APEI_EINJ mainly used for debugging and testing the other parts of APEI and some other RAS features. +config ACPI_APEI_EINJ_CXL + bool "CXL Error INJection Support" + default ACPI_APEI_EINJ + depends on ACPI_APEI_EINJ + depends on CXL_BUS && CXL_BUS <= ACPI_APEI_EINJ + help + Support for CXL protocol Error INJection through debugfs/cxl. + Availability and which errors are supported is dependent on + the host platform. Look to ACPI v6.5 section 18.6.4 and kernel + EINJ documentation for more information. + + If unsure say 'n' + config ACPI_APEI_ERST_DEBUG tristate "APEI Error Record Serialization Table (ERST) Debug Support" depends on ACPI_APEI diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile index 4dfac2128737..2c474e6477e1 100644 --- a/drivers/acpi/apei/Makefile +++ b/drivers/acpi/apei/Makefile @@ -2,6 +2,8 @@ obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o +einj-y := einj-core.o +einj-$(CONFIG_ACPI_APEI_EINJ_CXL) += einj-cxl.o obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o apei-y := apei-base.o hest.o erst.o bert.o diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index da370e1d31f4..9c84f3da7c09 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -1,8 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * apei-base.c - ACPI Platform Error Interface (APEI) supporting * infrastructure * - * APEI allows to report errors (for example from the chipset) to the + * APEI allows to report errors (for example from the chipset) to * the operating system. This improves NMI handling especially. In * addition it supports error serialization and error injection. * @@ -15,15 +16,6 @@ * * Copyright (C) 2009, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -33,10 +25,10 @@ #include <linux/slab.h> #include <linux/io.h> #include <linux/kref.h> -#include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/debugfs.h> -#include <asm/unaligned.h> +#include <acpi/apei.h> +#include <linux/unaligned.h> #include "apei-internal.h" @@ -133,12 +125,9 @@ EXPORT_SYMBOL_GPL(apei_exec_write_register); int apei_exec_write_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { - int rc; - ctx->value = entry->value; - rc = apei_exec_write_register(ctx, entry); - return rc; + return apei_exec_write_register(ctx, entry); } EXPORT_SYMBOL_GPL(apei_exec_write_register_value); @@ -178,9 +167,9 @@ rewind: if (ip == ctx->ip) { if (entry->instruction >= ctx->instructions || !ctx->ins_table[entry->instruction].run) { - pr_warning(FW_WARN APEI_PFX - "Invalid action table, unknown instruction type: %d\n", - entry->instruction); + pr_warn(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + entry->instruction); return -EINVAL; } run = ctx->ins_table[entry->instruction].run; @@ -219,9 +208,9 @@ static int apei_exec_for_each_entry(struct apei_exec_context *ctx, if (end) *end = i; if (ins >= ctx->instructions || !ins_table[ins].run) { - pr_warning(FW_WARN APEI_PFX - "Invalid action table, unknown instruction type: %d\n", - ins); + pr_warn(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + ins); return -EINVAL; } rc = func(ctx, entry, data); @@ -295,7 +284,7 @@ struct apei_res { }; /* Collect all resources requested, to avoid conflict */ -struct apei_resources apei_resources_all = { +static struct apei_resources apei_resources_all = { .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), }; @@ -327,7 +316,7 @@ repeat: if (res_ins) list_add(&res_ins->list, res_list); else { - res_ins = kmalloc(sizeof(*res), GFP_KERNEL); + res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL); if (!res_ins) return -ENOMEM; res_ins->start = start; @@ -587,18 +576,18 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, space_id = reg->space_id; *paddr = get_unaligned(®->address); if (!*paddr) { - pr_warning(FW_BUG APEI_PFX - "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n", - *paddr, bit_width, bit_offset, access_size_code, - space_id); + pr_warn(FW_BUG APEI_PFX + "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n", + *paddr, bit_width, bit_offset, access_size_code, + space_id); return -EINVAL; } if (access_size_code < 1 || access_size_code > 4) { - pr_warning(FW_BUG APEI_PFX - "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n", - *paddr, bit_width, bit_offset, access_size_code, - space_id); + pr_warn(FW_BUG APEI_PFX + "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n", + *paddr, bit_width, bit_offset, access_size_code, + space_id); return -EINVAL; } *access_bit_width = 1UL << (access_size_code + 2); @@ -612,19 +601,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, *access_bit_width = 64; if ((bit_width + bit_offset) > *access_bit_width) { - pr_warning(FW_BUG APEI_PFX - "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n", - *paddr, bit_width, bit_offset, access_size_code, - space_id); + pr_warn(FW_BUG APEI_PFX + "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n", + *paddr, bit_width, bit_offset, access_size_code, + space_id); return -EINVAL; } if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && space_id != ACPI_ADR_SPACE_SYSTEM_IO) { - pr_warning(FW_BUG APEI_PFX - "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n", - *paddr, bit_width, bit_offset, access_size_code, - space_id); + pr_warn(FW_BUG APEI_PFX + "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n", + *paddr, bit_width, bit_offset, access_size_code, + space_id); return -EINVAL; } @@ -640,7 +629,15 @@ int apei_map_generic_address(struct acpi_generic_address *reg) rc = apei_check_gar(reg, &address, &access_bit_width); if (rc) return rc; - return acpi_os_map_generic_address(reg); + + /* IO space doesn't need mapping */ + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) + return 0; + + if (!acpi_os_map_generic_address(reg)) + return -ENXIO; + + return 0; } EXPORT_SYMBOL_GPL(apei_map_generic_address); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 1d6ef9654725..77c10a7a7a9f 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -7,7 +7,6 @@ #ifndef APEI_INTERNAL_H #define APEI_INTERNAL_H -#include <linux/cper.h> #include <linux/acpi.h> struct apei_exec_context; @@ -130,10 +129,23 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) return sizeof(*estatus) + estatus->data_length; } -void cper_estatus_print(const char *pfx, - const struct acpi_hest_generic_status *estatus); -int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); -int cper_estatus_check(const struct acpi_hest_generic_status *estatus); - int apei_osc_setup(void); + +int einj_get_available_error_type(u32 *type, int einj_action); +int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, + u64 param4); +int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2, + u64 param3, u64 param4); +bool einj_is_cxl_error_type(u64 type); +int einj_validate_error_type(u64 type); + +#ifndef ACPI_EINJ_CXL_CACHE_CORRECTABLE +#define ACPI_EINJ_CXL_CACHE_CORRECTABLE BIT(12) +#define ACPI_EINJ_CXL_CACHE_UNCORRECTABLE BIT(13) +#define ACPI_EINJ_CXL_CACHE_FATAL BIT(14) +#define ACPI_EINJ_CXL_MEM_CORRECTABLE BIT(15) +#define ACPI_EINJ_CXL_MEM_UNCORRECTABLE BIT(16) +#define ACPI_EINJ_CXL_MEM_FATAL BIT(17) +#endif + #endif diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 12771fcf0417..5427e49e646b 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * APEI Boot Error Record Table (BERT) support * @@ -16,15 +17,13 @@ * * For more information about BERT, please refer to ACPI Specification * version 4.0, section 17.3.1 - * - * This file is licensed under GPLv2. - * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/cper.h> #include <linux/io.h> #include "apei-internal.h" @@ -32,35 +31,52 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt -static int bert_disable; +#define ACPI_BERT_PRINT_MAX_RECORDS 5 +#define ACPI_BERT_PRINT_MAX_LEN 1024 + +static int bert_disable __initdata; +/* + * Print "all" the error records in the BERT table, but avoid huge spam to + * the console if the BIOS included oversize records, or too many records. + * Skipping some records here does not lose anything because the full + * data is available to user tools in: + * /sys/firmware/acpi/tables/data/BERT + */ static void __init bert_print_all(struct acpi_bert_region *region, unsigned int region_len) { struct acpi_hest_generic_status *estatus = (struct acpi_hest_generic_status *)region; int remain = region_len; + int printed = 0, skipped = 0; u32 estatus_len; - if (!estatus->block_status) - return; - - while (remain > sizeof(struct acpi_bert_region)) { - if (cper_estatus_check(estatus)) { - pr_err(FW_BUG "Invalid error record.\n"); - return; - } - + while (remain >= sizeof(struct acpi_bert_region)) { estatus_len = cper_estatus_len(estatus); if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", estatus_len); - return; + break; } - pr_info_once("Error records from previous boot:\n"); + /* No more error records. */ + if (!estatus->block_status) + break; - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (cper_estatus_check(estatus)) { + pr_err(FW_BUG "Invalid error record.\n"); + break; + } + + if (estatus_len < ACPI_BERT_PRINT_MAX_LEN && + printed < ACPI_BERT_PRINT_MAX_RECORDS) { + pr_info_once("Error records from previous boot:\n"); + cper_estatus_print(KERN_INFO HW_ERR, estatus); + printed++; + } else { + skipped++; + } /* * Because the boot error source is "one-time polled" type, @@ -70,19 +86,21 @@ static void __init bert_print_all(struct acpi_bert_region *region, estatus->block_status = 0; estatus = (void *)estatus + estatus_len; - /* No more error records. */ - if (!estatus->block_status) - return; - remain -= estatus_len; } + + if (skipped) + pr_info(HW_ERR "Skipped %d error records\n", skipped); + + if (printed + skipped) + pr_info("Total records found: %d\n", printed + skipped); } static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); @@ -124,7 +142,7 @@ static int __init bert_init(void) rc = bert_check_table(bert_tab); if (rc) { pr_err(FW_BUG "table invalid.\n"); - return rc; + goto out_put_bert_tab; } region_len = bert_tab->region_length; @@ -132,7 +150,7 @@ static int __init bert_init(void) rc = apei_resources_add(&bert_resources, bert_tab->address, region_len, true); if (rc) - return rc; + goto out_put_bert_tab; rc = apei_resources_request(&bert_resources, "APEI BERT"); if (rc) goto out_fini; @@ -147,6 +165,8 @@ static int __init bert_init(void) apei_resources_release(&bert_resources); out_fini: apei_resources_fini(&bert_resources); +out_put_bert_tab: + acpi_put_table((struct acpi_table_header *)bert_tab); return rc; } diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c new file mode 100644 index 000000000000..305c240a303f --- /dev/null +++ b/drivers/acpi/apei/einj-core.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * APEI Error INJection support + * + * EINJ provides a hardware error injection mechanism, this is useful + * for debugging and testing of other APEI and RAS features. + * + * For more information about EINJ, please refer to ACPI Specification + * version 4.0, section 17.5. + * + * Copyright 2009-2010 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/nmi.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/device/faux.h> +#include <linux/unaligned.h> + +#include "apei-internal.h" + +#undef pr_fmt +#define pr_fmt(fmt) "EINJ: " fmt + +#define SLEEP_UNIT_MIN 1000 /* 1ms */ +#define SLEEP_UNIT_MAX 5000 /* 5ms */ +/* Firmware should respond within 1 seconds */ +#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC) +#define COMPONENT_LEN 16 +#define ACPI65_EINJV2_SUPP BIT(30) +#define ACPI5_VENDOR_BIT BIT(31) +#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \ + ACPI_EINJ_MEMORY_UNCORRECTABLE | \ + ACPI_EINJ_MEMORY_FATAL) +#define CXL_ERROR_MASK (ACPI_EINJ_CXL_CACHE_CORRECTABLE | \ + ACPI_EINJ_CXL_CACHE_UNCORRECTABLE | \ + ACPI_EINJ_CXL_CACHE_FATAL | \ + ACPI_EINJ_CXL_MEM_CORRECTABLE | \ + ACPI_EINJ_CXL_MEM_UNCORRECTABLE | \ + ACPI_EINJ_CXL_MEM_FATAL) + +/* + * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. + */ +static int acpi5; + +struct syndrome_array { + union { + u8 acpi_id[COMPONENT_LEN]; + u8 device_id[COMPONENT_LEN]; + u8 pcie_sbdf[COMPONENT_LEN]; + u8 vendor_id[COMPONENT_LEN]; + } comp_id; + union { + u8 proc_synd[COMPONENT_LEN]; + u8 mem_synd[COMPONENT_LEN]; + u8 pcie_synd[COMPONENT_LEN]; + u8 vendor_synd[COMPONENT_LEN]; + } comp_synd; +}; + +struct einjv2_extension_struct { + u32 length; + u16 revision; + u16 component_arr_count; + struct syndrome_array component_arr[] __counted_by(component_arr_count); +}; + +struct set_error_type_with_address { + u32 type; + u32 vendor_extension; + u32 flags; + u32 apicid; + u64 memory_address; + u64 memory_address_range; + u32 pcie_sbdf; + struct einjv2_extension_struct einjv2_struct; +}; +enum { + SETWA_FLAGS_APICID = 1, + SETWA_FLAGS_MEM = 2, + SETWA_FLAGS_PCIE_SBDF = 4, + SETWA_FLAGS_EINJV2 = 8, +}; + +/* + * Vendor extensions for platform specific operations + */ +struct vendor_error_type_extension { + u32 length; + u32 pcie_sbdf; + u16 vendor_id; + u16 device_id; + u8 rev_id; + u8 reserved[3]; +}; + +static u32 notrigger; + +static u32 vendor_flags; +static struct debugfs_blob_wrapper vendor_blob; +static struct debugfs_blob_wrapper vendor_errors; +static char vendor_dev[64]; + +static u32 max_nr_components; +static u32 available_error_type; +static u32 available_error_type_v2; +static struct syndrome_array *syndrome_data; + +/* + * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the + * EINJ table through an unpublished extension. Use with caution as + * most will ignore the parameter and make their own choice of address + * for error injection. This extension is used only if + * param_extension module parameter is specified. + */ +struct einj_parameter { + u64 type; + u64 reserved1; + u64 reserved2; + u64 param1; + u64 param2; +}; + +#define EINJ_OP_BUSY 0x1 +#define EINJ_STATUS_SUCCESS 0x0 +#define EINJ_STATUS_FAIL 0x1 +#define EINJ_STATUS_INVAL 0x2 + +#define EINJ_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_einj))) + +static bool param_extension; +module_param(param_extension, bool, 0); + +static struct acpi_table_einj *einj_tab; + +static struct apei_resources einj_resources; + +static struct apei_exec_ins_type einj_ins_type[] = { + [ACPI_EINJ_READ_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_EINJ_READ_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_EINJ_WRITE_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_EINJ_WRITE_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_EINJ_NOOP] = { + .flags = 0, + .run = apei_exec_noop, + }, +}; + +/* + * Prevent EINJ interpreter to run simultaneously, because the + * corresponding firmware implementation may not work properly when + * invoked simultaneously. + */ +static DEFINE_MUTEX(einj_mutex); + +/* + * Exported APIs use this flag to exit early if einj_probe() failed. + */ +bool einj_initialized __ro_after_init; + +static void __iomem *einj_param; +static u32 v5param_size; +static u32 v66param_size; +static bool is_v2; + +static void einj_exec_ctx_init(struct apei_exec_context *ctx) +{ + apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), + EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); +} + +static int __einj_get_available_error_type(u32 *type, int einj_action) +{ + struct apei_exec_context ctx; + int rc; + + einj_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, einj_action); + if (rc) + return rc; + *type = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +/* Get error injection capabilities of the platform */ +int einj_get_available_error_type(u32 *type, int einj_action) +{ + int rc; + + mutex_lock(&einj_mutex); + rc = __einj_get_available_error_type(type, einj_action); + mutex_unlock(&einj_mutex); + + return rc; +} + +static int einj_get_available_error_types(u32 *type1, u32 *type2) +{ + int rc; + + rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE); + if (rc) + return rc; + if (*type1 & ACPI65_EINJV2_SUPP) { + rc = einj_get_available_error_type(type2, + ACPI_EINJV2_GET_ERROR_TYPE); + if (rc) + return rc; + } + + return 0; +} + +static int einj_timedout(u64 *t) +{ + if ((s64)*t < SLEEP_UNIT_MIN) { + pr_warn(FW_WARN "Firmware does not respond in time\n"); + return 1; + } + *t -= SLEEP_UNIT_MIN; + usleep_range(SLEEP_UNIT_MIN, SLEEP_UNIT_MAX); + + return 0; +} + +static void get_oem_vendor_struct(u64 paddr, int offset, + struct vendor_error_type_extension *v) +{ + unsigned long vendor_size; + u64 target_pa = paddr + offset + sizeof(struct vendor_error_type_extension); + + vendor_size = v->length - sizeof(struct vendor_error_type_extension); + + if (vendor_size) + vendor_errors.data = acpi_os_map_memory(target_pa, vendor_size); + + if (vendor_errors.data) + vendor_errors.size = vendor_size; +} + +static void check_vendor_extension(u64 paddr, + struct set_error_type_with_address *v5param) +{ + int offset = v5param->vendor_extension; + struct vendor_error_type_extension v; + struct vendor_error_type_extension __iomem *p; + u32 sbdf; + + if (!offset) + return; + p = acpi_os_map_iomem(paddr + offset, sizeof(*p)); + if (!p) + return; + memcpy_fromio(&v, p, sizeof(v)); + get_oem_vendor_struct(paddr, offset, &v); + sbdf = v.pcie_sbdf; + sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", + sbdf >> 24, (sbdf >> 16) & 0xff, + (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, + v.vendor_id, v.device_id, v.rev_id); + acpi_os_unmap_iomem(p, sizeof(v)); +} + +static u32 einjv2_init(struct einjv2_extension_struct *e) +{ + if (e->revision != 1) { + pr_info("Unknown v2 extension revision %u\n", e->revision); + return 0; + } + if (e->length < sizeof(*e) || e->length > PAGE_SIZE) { + pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length); + return 0; + } + if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) { + pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length); + return 0; + } + + return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]); +} + +static void __iomem *einj_get_parameter_address(void) +{ + int i; + u64 pa_v4 = 0, pa_v5 = 0; + struct acpi_whea_header *entry; + + entry = EINJ_TAB_ENTRY(einj_tab); + for (i = 0; i < einj_tab->entries; i++) { + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + pa_v4 = get_unaligned(&entry->register_region.address); + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + pa_v5 = get_unaligned(&entry->register_region.address); + entry++; + } + if (pa_v5) { + struct set_error_type_with_address v5param; + struct set_error_type_with_address __iomem *p; + + v5param_size = sizeof(v5param); + p = acpi_os_map_iomem(pa_v5, sizeof(*p)); + if (p) { + memcpy_fromio(&v5param, p, v5param_size); + acpi5 = 1; + check_vendor_extension(pa_v5, &v5param); + if (available_error_type & ACPI65_EINJV2_SUPP) { + struct einjv2_extension_struct *e; + + e = &v5param.einjv2_struct; + max_nr_components = einjv2_init(e); + + /* remap including einjv2_extension_struct */ + acpi_os_unmap_iomem(p, v5param_size); + v66param_size = v5param_size - sizeof(*e) + e->length; + p = acpi_os_map_iomem(pa_v5, v66param_size); + } + + return p; + } + } + if (param_extension && pa_v4) { + struct einj_parameter v4param; + struct einj_parameter __iomem *p; + + p = acpi_os_map_iomem(pa_v4, sizeof(*p)); + if (!p) + return NULL; + memcpy_fromio(&v4param, p, sizeof(v4param)); + if (v4param.reserved1 || v4param.reserved2) { + acpi_os_unmap_iomem(p, sizeof(v4param)); + return NULL; + } + return p; + } + + return NULL; +} + +/* do sanity check to trigger table */ +static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) +{ + if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) + return -EINVAL; + if (trigger_tab->table_size > PAGE_SIZE || + trigger_tab->table_size < trigger_tab->header_size) + return -EINVAL; + if (trigger_tab->entry_count != + (trigger_tab->table_size - trigger_tab->header_size) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +static struct acpi_generic_address *einj_get_trigger_parameter_region( + struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2) +{ + int i; + struct acpi_whea_header *entry; + + entry = (struct acpi_whea_header *) + ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); + for (i = 0; i < trigger_tab->entry_count; i++) { + if (entry->action == ACPI_EINJ_TRIGGER_ERROR && + entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY && + (entry->register_region.address & param2) == (param1 & param2)) + return &entry->register_region; + entry++; + } + + return NULL; +} +/* Execute instructions in trigger error action table */ +static int __einj_error_trigger(u64 trigger_paddr, u32 type, + u64 param1, u64 param2) +{ + struct acpi_einj_trigger trigger_tab; + struct acpi_einj_trigger *full_trigger_tab; + struct apei_exec_context trigger_ctx; + struct apei_resources trigger_resources; + struct acpi_whea_header *trigger_entry; + struct resource *r; + u32 table_size; + int rc = -EIO; + struct acpi_generic_address *trigger_param_region = NULL; + struct acpi_einj_trigger __iomem *p = NULL; + + r = request_mem_region(trigger_paddr, sizeof(trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n", + (unsigned long long)trigger_paddr, + (unsigned long long)trigger_paddr + + sizeof(trigger_tab) - 1); + goto out; + } + p = ioremap_cache(trigger_paddr, sizeof(*p)); + if (!p) { + pr_err("Failed to map trigger table!\n"); + goto out_rel_header; + } + memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab)); + rc = einj_check_trigger_header(&trigger_tab); + if (rc) { + pr_warn(FW_BUG "Invalid trigger error action table.\n"); + goto out_rel_header; + } + + /* No action structures in the TRIGGER_ERROR table, nothing to do */ + if (!trigger_tab.entry_count) + goto out_rel_header; + + rc = -EIO; + table_size = trigger_tab.table_size; + full_trigger_tab = kmalloc(table_size, GFP_KERNEL); + if (!full_trigger_tab) + goto out_rel_header; + r = request_mem_region(trigger_paddr + sizeof(trigger_tab), + table_size - sizeof(trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", + (unsigned long long)trigger_paddr + sizeof(trigger_tab), + (unsigned long long)trigger_paddr + table_size - 1); + goto out_free_trigger_tab; + } + iounmap(p); + p = ioremap_cache(trigger_paddr, table_size); + if (!p) { + pr_err("Failed to map trigger table!\n"); + goto out_rel_entry; + } + memcpy_fromio(full_trigger_tab, p, table_size); + trigger_entry = (struct acpi_whea_header *) + ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger)); + apei_resources_init(&trigger_resources); + apei_exec_ctx_init(&trigger_ctx, einj_ins_type, + ARRAY_SIZE(einj_ins_type), + trigger_entry, trigger_tab.entry_count); + rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); + if (rc) + goto out_fini; + rc = apei_resources_sub(&trigger_resources, &einj_resources); + if (rc) + goto out_fini; + /* + * Some firmware will access target address specified in + * param1 to trigger the error when injecting memory error. + * This will cause resource conflict with regular memory. So + * remove it from trigger table resources. + */ + if ((param_extension || acpi5) && (type & MEM_ERROR_MASK) && param2) { + struct apei_resources addr_resources; + + apei_resources_init(&addr_resources); + trigger_param_region = einj_get_trigger_parameter_region( + full_trigger_tab, param1, param2); + if (trigger_param_region) { + rc = apei_resources_add(&addr_resources, + trigger_param_region->address, + trigger_param_region->bit_width/8, true); + if (rc) + goto out_fini; + rc = apei_resources_sub(&trigger_resources, + &addr_resources); + } + apei_resources_fini(&addr_resources); + if (rc) + goto out_fini; + } + rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); + if (rc) + goto out_fini; + rc = apei_exec_pre_map_gars(&trigger_ctx); + if (rc) + goto out_release; + + rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); + + apei_exec_post_unmap_gars(&trigger_ctx); +out_release: + apei_resources_release(&trigger_resources); +out_fini: + apei_resources_fini(&trigger_resources); +out_rel_entry: + release_mem_region(trigger_paddr + sizeof(trigger_tab), + table_size - sizeof(trigger_tab)); +out_free_trigger_tab: + kfree(full_trigger_tab); +out_rel_header: + release_mem_region(trigger_paddr, sizeof(trigger_tab)); +out: + if (p) + iounmap(p); + + return rc; +} + +static bool is_end_of_list(u8 *val) +{ + for (int i = 0; i < COMPONENT_LEN; ++i) { + if (val[i] != 0xFF) + return false; + } + return true; +} +static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, + u64 param3, u64 param4) +{ + struct apei_exec_context ctx; + u32 param_size = is_v2 ? v66param_size : v5param_size; + u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; + int i, rc; + + einj_exec_ctx_init(&ctx); + + rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, type); + if (acpi5) { + struct set_error_type_with_address *v5param; + + v5param = kmalloc(param_size, GFP_KERNEL); + if (!v5param) + return -ENOMEM; + + memcpy_fromio(v5param, einj_param, param_size); + v5param->type = type; + if (type & ACPI5_VENDOR_BIT) { + switch (vendor_flags) { + case SETWA_FLAGS_APICID: + v5param->apicid = param1; + break; + case SETWA_FLAGS_MEM: + v5param->memory_address = param1; + v5param->memory_address_range = param2; + break; + case SETWA_FLAGS_PCIE_SBDF: + v5param->pcie_sbdf = param1; + break; + } + v5param->flags = vendor_flags; + } else if (flags) { + v5param->flags = flags; + v5param->memory_address = param1; + v5param->memory_address_range = param2; + + if (is_v2) { + for (i = 0; i < max_nr_components; i++) { + if (is_end_of_list(syndrome_data[i].comp_id.acpi_id)) + break; + v5param->einjv2_struct.component_arr[i].comp_id = + syndrome_data[i].comp_id; + v5param->einjv2_struct.component_arr[i].comp_synd = + syndrome_data[i].comp_synd; + } + v5param->einjv2_struct.component_arr_count = i; + } else { + v5param->apicid = param3; + v5param->pcie_sbdf = param4; + } + } else { + switch (type) { + case ACPI_EINJ_PROCESSOR_CORRECTABLE: + case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: + case ACPI_EINJ_PROCESSOR_FATAL: + v5param->apicid = param1; + v5param->flags = SETWA_FLAGS_APICID; + break; + case ACPI_EINJ_MEMORY_CORRECTABLE: + case ACPI_EINJ_MEMORY_UNCORRECTABLE: + case ACPI_EINJ_MEMORY_FATAL: + v5param->memory_address = param1; + v5param->memory_address_range = param2; + v5param->flags = SETWA_FLAGS_MEM; + break; + case ACPI_EINJ_PCIX_CORRECTABLE: + case ACPI_EINJ_PCIX_UNCORRECTABLE: + case ACPI_EINJ_PCIX_FATAL: + v5param->pcie_sbdf = param1; + v5param->flags = SETWA_FLAGS_PCIE_SBDF; + break; + } + } + memcpy_toio(einj_param, v5param, param_size); + kfree(v5param); + } else { + rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); + if (rc) + return rc; + if (einj_param) { + struct einj_parameter v4param; + + memcpy_fromio(&v4param, einj_param, sizeof(v4param)); + v4param.param1 = param1; + v4param.param2 = param2; + memcpy_toio(einj_param, &v4param, sizeof(v4param)); + } + } + rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!(val & EINJ_OP_BUSY)) + break; + if (einj_timedout(&timeout)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (val == EINJ_STATUS_FAIL) + return -EBUSY; + else if (val == EINJ_STATUS_INVAL) + return -EINVAL; + + /* + * The error is injected into the platform successfully, then it needs + * to trigger the error. + */ + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); + if (rc) + return rc; + trigger_paddr = apei_exec_ctx_get_output(&ctx); + if (notrigger == 0) { + rc = __einj_error_trigger(trigger_paddr, type, param1, param2); + if (rc) + return rc; + } + rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); + + return rc; +} + +/* Allow almost all types of address except MMIO. */ +static bool is_allowed_range(u64 base_addr, u64 size) +{ + int i; + /* + * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE. + * However, IORES_DESC_NONE is treated like a wildcard when we check if + * region intersects with known resource. So do an allow list check for + * IORES_DESCs that definitely or most likely not MMIO. + */ + int non_mmio_desc[] = { + IORES_DESC_CRASH_KERNEL, + IORES_DESC_ACPI_TABLES, + IORES_DESC_ACPI_NV_STORAGE, + IORES_DESC_PERSISTENT_MEMORY, + IORES_DESC_PERSISTENT_MEMORY_LEGACY, + /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */ + IORES_DESC_RESERVED, + IORES_DESC_SOFT_RESERVED, + }; + + if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) + == REGION_INTERSECTS) + return true; + + for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) { + if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i]) + == REGION_INTERSECTS) + return true; + } + + if (arch_is_platform_page(base_addr)) + return true; + + return false; +} + +/* Inject the specified hardware error */ +int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, + u64 param4) +{ + int rc; + u64 base_addr, size; + + /* If user manually set "flags", make sure it is legal */ + if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM | + SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2))) + return -EINVAL; + + /* check if type is a valid EINJv2 error type */ + if (is_v2) { + if (!(type & available_error_type_v2)) + return -EINVAL; + } + /* + * We need extra sanity checks for memory errors. + * Other types leap directly to injection. + */ + + /* ensure param1/param2 existed */ + if (!(param_extension || acpi5)) + goto inject; + + /* ensure injection is memory related */ + if (type & ACPI5_VENDOR_BIT) { + if (vendor_flags != SETWA_FLAGS_MEM) + goto inject; + } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) { + goto inject; + } + + /* + * Injections targeting a CXL 1.0/1.1 port have to be injected + * via the einj_cxl_rch_error_inject() path as that does the proper + * validation of the given RCRB base (MMIO) address. + */ + if (einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM)) + return -EINVAL; + + /* + * Disallow crazy address masks that give BIOS leeway to pick + * injection address almost anywhere. Insist on page or + * better granularity and that target address is normal RAM or + * as long as is not MMIO. + */ + base_addr = param1 & param2; + size = ~param2 + 1; + + if ((param2 & PAGE_MASK) != PAGE_MASK) + return -EINVAL; + + if (!is_allowed_range(base_addr, size)) + return -EINVAL; + + if (is_zero_pfn(base_addr >> PAGE_SHIFT)) + return -EADDRINUSE; + +inject: + mutex_lock(&einj_mutex); + rc = __einj_error_inject(type, flags, param1, param2, param3, param4); + mutex_unlock(&einj_mutex); + + return rc; +} + +int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2, + u64 param3, u64 param4) +{ + int rc; + + if (!(einj_is_cxl_error_type(type) && (flags & SETWA_FLAGS_MEM))) + return -EINVAL; + + mutex_lock(&einj_mutex); + rc = __einj_error_inject(type, flags, param1, param2, param3, param4); + mutex_unlock(&einj_mutex); + + return rc; +} + +static u32 error_type; +static u32 error_flags; +static u64 error_param1; +static u64 error_param2; +static u64 error_param3; +static u64 error_param4; +static struct dentry *einj_debug_dir; +static char einj_buf[32]; +static bool einj_v2_enabled; +static struct { u32 mask; const char *str; } const einj_error_type_string[] = { + { BIT(0), "Processor Correctable" }, + { BIT(1), "Processor Uncorrectable non-fatal" }, + { BIT(2), "Processor Uncorrectable fatal" }, + { BIT(3), "Memory Correctable" }, + { BIT(4), "Memory Uncorrectable non-fatal" }, + { BIT(5), "Memory Uncorrectable fatal" }, + { BIT(6), "PCI Express Correctable" }, + { BIT(7), "PCI Express Uncorrectable non-fatal" }, + { BIT(8), "PCI Express Uncorrectable fatal" }, + { BIT(9), "Platform Correctable" }, + { BIT(10), "Platform Uncorrectable non-fatal" }, + { BIT(11), "Platform Uncorrectable fatal"}, + { BIT(31), "Vendor Defined Error Types" }, +}; + +static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = { + { BIT(0), "EINJV2 Processor Error" }, + { BIT(1), "EINJV2 Memory Error" }, + { BIT(2), "EINJV2 PCI Express Error" }, +}; + +static int available_error_type_show(struct seq_file *m, void *v) +{ + + for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++) + if (available_error_type & einj_error_type_string[pos].mask) + seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask, + einj_error_type_string[pos].str); + if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) { + for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) { + if (available_error_type_v2 & einjv2_error_type_string[pos].mask) + seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask, + einjv2_error_type_string[pos].str); + } + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(available_error_type); + +static ssize_t error_type_get(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf)); +} + +bool einj_is_cxl_error_type(u64 type) +{ + return (type & CXL_ERROR_MASK) && (!(type & ACPI5_VENDOR_BIT)); +} + +int einj_validate_error_type(u64 type) +{ + u32 tval, vendor; + + /* Only low 32 bits for error type are valid */ + if (type & GENMASK_ULL(63, 32)) + return -EINVAL; + + /* + * Vendor defined types have 0x80000000 bit set, and + * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE + */ + vendor = type & ACPI5_VENDOR_BIT; + tval = type & GENMASK(30, 0); + + /* Only one error type can be specified */ + if (tval & (tval - 1)) + return -EINVAL; + if (!vendor) + if (!(type & (available_error_type | available_error_type_v2))) + return -EINVAL; + + return 0; +} + +static ssize_t error_type_set(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int rc; + u64 val; + + /* Leave the last character for the NUL terminator */ + if (count > sizeof(einj_buf) - 1) + return -EINVAL; + + memset(einj_buf, 0, sizeof(einj_buf)); + if (copy_from_user(einj_buf, buf, count)) + return -EFAULT; + + if (strncmp(einj_buf, "V2_", 3) == 0) { + if (!sscanf(einj_buf, "V2_%llx", &val)) + return -EINVAL; + is_v2 = true; + } else { + if (!sscanf(einj_buf, "%llx", &val)) + return -EINVAL; + is_v2 = false; + } + + rc = einj_validate_error_type(val); + if (rc) + return rc; + + error_type = val; + + return count; +} + +static const struct file_operations error_type_fops = { + .read = error_type_get, + .write = error_type_set, +}; + +static int error_inject_set(void *data, u64 val) +{ + if (!error_type) + return -EINVAL; + + if (is_v2) + error_flags |= SETWA_FLAGS_EINJV2; + else + error_flags &= ~SETWA_FLAGS_EINJV2; + + return einj_error_inject(error_type, error_flags, error_param1, error_param2, + error_param3, error_param4); +} + +DEFINE_DEBUGFS_ATTRIBUTE(error_inject_fops, NULL, error_inject_set, "%llu\n"); + +static int einj_check_table(struct acpi_table_einj *einj_tab) +{ + if ((einj_tab->header_length != + (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) + && (einj_tab->header_length != sizeof(struct acpi_table_einj))) + return -EINVAL; + if (einj_tab->header.length < sizeof(struct acpi_table_einj)) + return -EINVAL; + if (einj_tab->entries != + (einj_tab->header.length - sizeof(struct acpi_table_einj)) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off) +{ + char output[2 * COMPONENT_LEN + 1]; + u8 *data = f->f_inode->i_private; + int i; + + if (*off >= sizeof(output)) + return 0; + + for (i = 0; i < COMPONENT_LEN; i++) + sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]); + output[2 * COMPONENT_LEN] = '\n'; + + return simple_read_from_buffer(buf, count, off, output, sizeof(output)); +} + +static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off) +{ + char input[2 + 2 * COMPONENT_LEN + 2]; + u8 *save = f->f_inode->i_private; + u8 tmp[COMPONENT_LEN]; + char byte[3] = {}; + char *s, *e; + ssize_t c; + long val; + int i; + + /* Require that user supply whole input line in one write(2) syscall */ + if (*off) + return -EINVAL; + + c = simple_write_to_buffer(input, sizeof(input), off, buf, count); + if (c < 0) + return c; + + if (c < 1 || input[c - 1] != '\n') + return -EINVAL; + + /* Empty line means invalidate this entry */ + if (c == 1) { + memset(save, 0xff, COMPONENT_LEN); + return c; + } + + if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X')) + s = input + 2; + else + s = input; + e = input + c - 1; + + for (i = 0; i < COMPONENT_LEN; i++) { + byte[1] = *--e; + byte[0] = e > s ? *--e : '0'; + if (kstrtol(byte, 16, &val)) + return -EINVAL; + tmp[i] = val; + if (e <= s) + break; + } + while (++i < COMPONENT_LEN) + tmp[i] = 0; + + memcpy(save, tmp, COMPONENT_LEN); + + return c; +} + +static const struct file_operations u128_fops = { + .read = u128_read, + .write = u128_write, +}; + +static bool setup_einjv2_component_files(void) +{ + char name[32]; + + syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL); + if (!syndrome_data) + return false; + + for (int i = 0; i < max_nr_components; i++) { + sprintf(name, "component_id%d", i); + debugfs_create_file(name, 0600, einj_debug_dir, + &syndrome_data[i].comp_id, &u128_fops); + sprintf(name, "component_syndrome%d", i); + debugfs_create_file(name, 0600, einj_debug_dir, + &syndrome_data[i].comp_synd, &u128_fops); + } + + return true; +} + +static int __init einj_probe(struct faux_device *fdev) +{ + int rc; + acpi_status status; + struct apei_exec_context ctx; + + status = acpi_get_table(ACPI_SIG_EINJ, 0, + (struct acpi_table_header **)&einj_tab); + if (status == AE_NOT_FOUND) { + pr_debug("EINJ table not found.\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + pr_err("Failed to get EINJ table: %s\n", + acpi_format_exception(status)); + return -EINVAL; + } + + rc = einj_check_table(einj_tab); + if (rc) { + pr_warn(FW_BUG "Invalid EINJ table.\n"); + goto err_put_table; + } + + rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2); + if (rc) + goto err_put_table; + + rc = -ENOMEM; + einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); + + debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir, + NULL, &available_error_type_fops); + debugfs_create_file_unsafe("error_type", 0600, einj_debug_dir, + NULL, &error_type_fops); + debugfs_create_file_unsafe("error_inject", 0200, einj_debug_dir, + NULL, &error_inject_fops); + + apei_resources_init(&einj_resources); + einj_exec_ctx_init(&ctx); + rc = apei_exec_collect_resources(&ctx, &einj_resources); + if (rc) { + pr_err("Error collecting EINJ resources.\n"); + goto err_fini; + } + + rc = apei_resources_request(&einj_resources, "APEI EINJ"); + if (rc) { + pr_err("Error requesting memory/port resources.\n"); + goto err_fini; + } + + rc = apei_exec_pre_map_gars(&ctx); + if (rc) { + pr_err("Error pre-mapping GARs.\n"); + goto err_release; + } + + einj_param = einj_get_parameter_address(); + if ((param_extension || acpi5) && einj_param) { + debugfs_create_x32("flags", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_flags); + debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param1); + debugfs_create_x64("param2", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param2); + debugfs_create_x64("param3", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param3); + debugfs_create_x64("param4", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param4); + debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, + einj_debug_dir, ¬rigger); + if (available_error_type & ACPI65_EINJV2_SUPP) + einj_v2_enabled = setup_einjv2_component_files(); + } + + if (vendor_dev[0]) { + vendor_blob.data = vendor_dev; + vendor_blob.size = strlen(vendor_dev); + debugfs_create_blob("vendor", S_IRUSR, einj_debug_dir, + &vendor_blob); + debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, + einj_debug_dir, &vendor_flags); + } + + if (vendor_errors.size) + debugfs_create_blob("oem_error", 0600, einj_debug_dir, + &vendor_errors); + + pr_info("Error INJection is initialized.\n"); + + return 0; + +err_release: + apei_resources_release(&einj_resources); +err_fini: + apei_resources_fini(&einj_resources); + debugfs_remove_recursive(einj_debug_dir); +err_put_table: + acpi_put_table((struct acpi_table_header *)einj_tab); + + return rc; +} + +static void einj_remove(struct faux_device *fdev) +{ + struct apei_exec_context ctx; + + if (einj_param) { + acpi_size size; + + if (v66param_size) + size = v66param_size; + else if (acpi5) + size = v5param_size; + else + size = sizeof(struct einj_parameter); + + acpi_os_unmap_iomem(einj_param, size); + if (vendor_errors.size) + acpi_os_unmap_memory(vendor_errors.data, vendor_errors.size); + } + einj_exec_ctx_init(&ctx); + apei_exec_post_unmap_gars(&ctx); + apei_resources_release(&einj_resources); + apei_resources_fini(&einj_resources); + debugfs_remove_recursive(einj_debug_dir); + kfree(syndrome_data); + acpi_put_table((struct acpi_table_header *)einj_tab); +} + +static struct faux_device *einj_dev; +static struct faux_device_ops einj_device_ops = { + .probe = einj_probe, + .remove = einj_remove, +}; + +static int __init einj_init(void) +{ + if (acpi_disabled) { + pr_debug("ACPI disabled.\n"); + return -ENODEV; + } + + einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops); + + if (einj_dev) + einj_initialized = true; + + return 0; +} + +static void __exit einj_exit(void) +{ + faux_device_destroy(einj_dev); +} + +module_init(einj_init); +module_exit(einj_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Error INJection support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c new file mode 100644 index 000000000000..e70a416ec925 --- /dev/null +++ b/drivers/acpi/apei/einj-cxl.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CXL Error INJection support. Used by CXL core to inject + * protocol errors into CXL ports. + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Ben Cheatham <benjamin.cheatham@amd.com> + */ +#include <linux/seq_file.h> +#include <linux/pci.h> +#include <cxl/einj.h> + +#include "apei-internal.h" + +/* Defined in einj-core.c */ +extern bool einj_initialized; + +static struct { u32 mask; const char *str; } const einj_cxl_error_type_string[] = { + { ACPI_EINJ_CXL_CACHE_CORRECTABLE, "CXL.cache Protocol Correctable" }, + { ACPI_EINJ_CXL_CACHE_UNCORRECTABLE, "CXL.cache Protocol Uncorrectable non-fatal" }, + { ACPI_EINJ_CXL_CACHE_FATAL, "CXL.cache Protocol Uncorrectable fatal" }, + { ACPI_EINJ_CXL_MEM_CORRECTABLE, "CXL.mem Protocol Correctable" }, + { ACPI_EINJ_CXL_MEM_UNCORRECTABLE, "CXL.mem Protocol Uncorrectable non-fatal" }, + { ACPI_EINJ_CXL_MEM_FATAL, "CXL.mem Protocol Uncorrectable fatal" }, +}; + +int einj_cxl_available_error_type_show(struct seq_file *m, void *v) +{ + int cxl_err, rc; + u32 available_error_type = 0; + + rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE); + if (rc) + return rc; + + for (int pos = 0; pos < ARRAY_SIZE(einj_cxl_error_type_string); pos++) { + cxl_err = ACPI_EINJ_CXL_CACHE_CORRECTABLE << pos; + + if (available_error_type & cxl_err) + seq_printf(m, "0x%08x\t%s\n", + einj_cxl_error_type_string[pos].mask, + einj_cxl_error_type_string[pos].str); + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, "CXL"); + +static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf) +{ + struct pci_bus *pbus; + struct pci_host_bridge *bridge; + u64 seg = 0, bus; + + pbus = dport_dev->bus; + bridge = pci_find_host_bridge(pbus); + + if (!bridge) + return -ENODEV; + + if (bridge->domain_nr != PCI_DOMAIN_NR_NOT_SET) + seg = bridge->domain_nr; + + bus = pbus->number; + *sbdf = (seg << 24) | (bus << 16) | (dport_dev->devfn << 8); + + return 0; +} + +int einj_cxl_inject_rch_error(u64 rcrb, u64 type) +{ + int rc; + + /* Only CXL error types can be specified */ + if (!einj_is_cxl_error_type(type)) + return -EINVAL; + + rc = einj_validate_error_type(type); + if (rc) + return rc; + + return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0), + 0, 0); +} +EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, "CXL"); + +int einj_cxl_inject_error(struct pci_dev *dport, u64 type) +{ + u64 param4 = 0; + int rc; + + /* Only CXL error types can be specified */ + if (!einj_is_cxl_error_type(type)) + return -EINVAL; + + rc = einj_validate_error_type(type); + if (rc) + return rc; + + rc = cxl_dport_get_sbdf(dport, ¶m4); + if (rc) + return rc; + + return einj_error_inject(type, 0x4, 0, 0, 0, param4); +} +EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, "CXL"); + +bool einj_cxl_is_initialized(void) +{ + return einj_initialized; +} +EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, "CXL"); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c deleted file mode 100644 index fcccbfdbdd1a..000000000000 --- a/drivers/acpi/apei/einj.c +++ /dev/null @@ -1,841 +0,0 @@ -/* - * APEI Error INJection support - * - * EINJ provides a hardware error injection mechanism, this is useful - * for debugging and testing of other APEI and RAS features. - * - * For more information about EINJ, please refer to ACPI Specification - * version 4.0, section 17.5. - * - * Copyright 2009-2010 Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/nmi.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <asm/unaligned.h> - -#include "apei-internal.h" - -#undef pr_fmt -#define pr_fmt(fmt) "EINJ: " fmt - -#define SPIN_UNIT 100 /* 100ns */ -/* Firmware should respond within 1 milliseconds */ -#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) -#define ACPI5_VENDOR_BIT BIT(31) -#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \ - ACPI_EINJ_MEMORY_UNCORRECTABLE | \ - ACPI_EINJ_MEMORY_FATAL) - -/* - * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. - */ -static int acpi5; - -struct set_error_type_with_address { - u32 type; - u32 vendor_extension; - u32 flags; - u32 apicid; - u64 memory_address; - u64 memory_address_range; - u32 pcie_sbdf; -}; -enum { - SETWA_FLAGS_APICID = 1, - SETWA_FLAGS_MEM = 2, - SETWA_FLAGS_PCIE_SBDF = 4, -}; - -/* - * Vendor extensions for platform specific operations - */ -struct vendor_error_type_extension { - u32 length; - u32 pcie_sbdf; - u16 vendor_id; - u16 device_id; - u8 rev_id; - u8 reserved[3]; -}; - -static u32 notrigger; - -static u32 vendor_flags; -static struct debugfs_blob_wrapper vendor_blob; -static char vendor_dev[64]; - -/* - * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the - * EINJ table through an unpublished extension. Use with caution as - * most will ignore the parameter and make their own choice of address - * for error injection. This extension is used only if - * param_extension module parameter is specified. - */ -struct einj_parameter { - u64 type; - u64 reserved1; - u64 reserved2; - u64 param1; - u64 param2; -}; - -#define EINJ_OP_BUSY 0x1 -#define EINJ_STATUS_SUCCESS 0x0 -#define EINJ_STATUS_FAIL 0x1 -#define EINJ_STATUS_INVAL 0x2 - -#define EINJ_TAB_ENTRY(tab) \ - ((struct acpi_whea_header *)((char *)(tab) + \ - sizeof(struct acpi_table_einj))) - -static bool param_extension; -module_param(param_extension, bool, 0); - -static struct acpi_table_einj *einj_tab; - -static struct apei_resources einj_resources; - -static struct apei_exec_ins_type einj_ins_type[] = { - [ACPI_EINJ_READ_REGISTER] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_read_register, - }, - [ACPI_EINJ_READ_REGISTER_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_read_register_value, - }, - [ACPI_EINJ_WRITE_REGISTER] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_write_register, - }, - [ACPI_EINJ_WRITE_REGISTER_VALUE] = { - .flags = APEI_EXEC_INS_ACCESS_REGISTER, - .run = apei_exec_write_register_value, - }, - [ACPI_EINJ_NOOP] = { - .flags = 0, - .run = apei_exec_noop, - }, -}; - -/* - * Prevent EINJ interpreter to run simultaneously, because the - * corresponding firmware implementation may not work properly when - * invoked simultaneously. - */ -static DEFINE_MUTEX(einj_mutex); - -static void *einj_param; - -static void einj_exec_ctx_init(struct apei_exec_context *ctx) -{ - apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), - EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); -} - -static int __einj_get_available_error_type(u32 *type) -{ - struct apei_exec_context ctx; - int rc; - - einj_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); - if (rc) - return rc; - *type = apei_exec_ctx_get_output(&ctx); - - return 0; -} - -/* Get error injection capabilities of the platform */ -static int einj_get_available_error_type(u32 *type) -{ - int rc; - - mutex_lock(&einj_mutex); - rc = __einj_get_available_error_type(type); - mutex_unlock(&einj_mutex); - - return rc; -} - -static int einj_timedout(u64 *t) -{ - if ((s64)*t < SPIN_UNIT) { - pr_warning(FW_WARN "Firmware does not respond in time\n"); - return 1; - } - *t -= SPIN_UNIT; - ndelay(SPIN_UNIT); - touch_nmi_watchdog(); - return 0; -} - -static void check_vendor_extension(u64 paddr, - struct set_error_type_with_address *v5param) -{ - int offset = v5param->vendor_extension; - struct vendor_error_type_extension *v; - u32 sbdf; - - if (!offset) - return; - v = acpi_os_map_iomem(paddr + offset, sizeof(*v)); - if (!v) - return; - sbdf = v->pcie_sbdf; - sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", - sbdf >> 24, (sbdf >> 16) & 0xff, - (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, - v->vendor_id, v->device_id, v->rev_id); - acpi_os_unmap_iomem(v, sizeof(*v)); -} - -static void *einj_get_parameter_address(void) -{ - int i; - u64 pa_v4 = 0, pa_v5 = 0; - struct acpi_whea_header *entry; - - entry = EINJ_TAB_ENTRY(einj_tab); - for (i = 0; i < einj_tab->entries; i++) { - if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && - entry->instruction == ACPI_EINJ_WRITE_REGISTER && - entry->register_region.space_id == - ACPI_ADR_SPACE_SYSTEM_MEMORY) - pa_v4 = get_unaligned(&entry->register_region.address); - if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && - entry->instruction == ACPI_EINJ_WRITE_REGISTER && - entry->register_region.space_id == - ACPI_ADR_SPACE_SYSTEM_MEMORY) - pa_v5 = get_unaligned(&entry->register_region.address); - entry++; - } - if (pa_v5) { - struct set_error_type_with_address *v5param; - - v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param)); - if (v5param) { - acpi5 = 1; - check_vendor_extension(pa_v5, v5param); - return v5param; - } - } - if (param_extension && pa_v4) { - struct einj_parameter *v4param; - - v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param)); - if (!v4param) - return NULL; - if (v4param->reserved1 || v4param->reserved2) { - acpi_os_unmap_iomem(v4param, sizeof(*v4param)); - return NULL; - } - return v4param; - } - - return NULL; -} - -/* do sanity check to trigger table */ -static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) -{ - if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) - return -EINVAL; - if (trigger_tab->table_size > PAGE_SIZE || - trigger_tab->table_size < trigger_tab->header_size) - return -EINVAL; - if (trigger_tab->entry_count != - (trigger_tab->table_size - trigger_tab->header_size) / - sizeof(struct acpi_einj_entry)) - return -EINVAL; - - return 0; -} - -static struct acpi_generic_address *einj_get_trigger_parameter_region( - struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2) -{ - int i; - struct acpi_whea_header *entry; - - entry = (struct acpi_whea_header *) - ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); - for (i = 0; i < trigger_tab->entry_count; i++) { - if (entry->action == ACPI_EINJ_TRIGGER_ERROR && - entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE && - entry->register_region.space_id == - ACPI_ADR_SPACE_SYSTEM_MEMORY && - (entry->register_region.address & param2) == (param1 & param2)) - return &entry->register_region; - entry++; - } - - return NULL; -} -/* Execute instructions in trigger error action table */ -static int __einj_error_trigger(u64 trigger_paddr, u32 type, - u64 param1, u64 param2) -{ - struct acpi_einj_trigger *trigger_tab = NULL; - struct apei_exec_context trigger_ctx; - struct apei_resources trigger_resources; - struct acpi_whea_header *trigger_entry; - struct resource *r; - u32 table_size; - int rc = -EIO; - struct acpi_generic_address *trigger_param_region = NULL; - - r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), - "APEI EINJ Trigger Table"); - if (!r) { - pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n", - (unsigned long long)trigger_paddr, - (unsigned long long)trigger_paddr + - sizeof(*trigger_tab) - 1); - goto out; - } - trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); - if (!trigger_tab) { - pr_err("Failed to map trigger table!\n"); - goto out_rel_header; - } - rc = einj_check_trigger_header(trigger_tab); - if (rc) { - pr_warning(FW_BUG "Invalid trigger error action table.\n"); - goto out_rel_header; - } - - /* No action structures in the TRIGGER_ERROR table, nothing to do */ - if (!trigger_tab->entry_count) - goto out_rel_header; - - rc = -EIO; - table_size = trigger_tab->table_size; - r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), - table_size - sizeof(*trigger_tab), - "APEI EINJ Trigger Table"); - if (!r) { - pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", - (unsigned long long)trigger_paddr + sizeof(*trigger_tab), - (unsigned long long)trigger_paddr + table_size - 1); - goto out_rel_header; - } - iounmap(trigger_tab); - trigger_tab = ioremap_cache(trigger_paddr, table_size); - if (!trigger_tab) { - pr_err("Failed to map trigger table!\n"); - goto out_rel_entry; - } - trigger_entry = (struct acpi_whea_header *) - ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); - apei_resources_init(&trigger_resources); - apei_exec_ctx_init(&trigger_ctx, einj_ins_type, - ARRAY_SIZE(einj_ins_type), - trigger_entry, trigger_tab->entry_count); - rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); - if (rc) - goto out_fini; - rc = apei_resources_sub(&trigger_resources, &einj_resources); - if (rc) - goto out_fini; - /* - * Some firmware will access target address specified in - * param1 to trigger the error when injecting memory error. - * This will cause resource conflict with regular memory. So - * remove it from trigger table resources. - */ - if ((param_extension || acpi5) && (type & MEM_ERROR_MASK) && param2) { - struct apei_resources addr_resources; - apei_resources_init(&addr_resources); - trigger_param_region = einj_get_trigger_parameter_region( - trigger_tab, param1, param2); - if (trigger_param_region) { - rc = apei_resources_add(&addr_resources, - trigger_param_region->address, - trigger_param_region->bit_width/8, true); - if (rc) - goto out_fini; - rc = apei_resources_sub(&trigger_resources, - &addr_resources); - } - apei_resources_fini(&addr_resources); - if (rc) - goto out_fini; - } - rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); - if (rc) - goto out_fini; - rc = apei_exec_pre_map_gars(&trigger_ctx); - if (rc) - goto out_release; - - rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); - - apei_exec_post_unmap_gars(&trigger_ctx); -out_release: - apei_resources_release(&trigger_resources); -out_fini: - apei_resources_fini(&trigger_resources); -out_rel_entry: - release_mem_region(trigger_paddr + sizeof(*trigger_tab), - table_size - sizeof(*trigger_tab)); -out_rel_header: - release_mem_region(trigger_paddr, sizeof(*trigger_tab)); -out: - if (trigger_tab) - iounmap(trigger_tab); - - return rc; -} - -static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, - u64 param3, u64 param4) -{ - struct apei_exec_context ctx; - u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; - int rc; - - einj_exec_ctx_init(&ctx); - - rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); - if (rc) - return rc; - apei_exec_ctx_set_input(&ctx, type); - if (acpi5) { - struct set_error_type_with_address *v5param = einj_param; - - v5param->type = type; - if (type & ACPI5_VENDOR_BIT) { - switch (vendor_flags) { - case SETWA_FLAGS_APICID: - v5param->apicid = param1; - break; - case SETWA_FLAGS_MEM: - v5param->memory_address = param1; - v5param->memory_address_range = param2; - break; - case SETWA_FLAGS_PCIE_SBDF: - v5param->pcie_sbdf = param1; - break; - } - v5param->flags = vendor_flags; - } else if (flags) { - v5param->flags = flags; - v5param->memory_address = param1; - v5param->memory_address_range = param2; - v5param->apicid = param3; - v5param->pcie_sbdf = param4; - } else { - switch (type) { - case ACPI_EINJ_PROCESSOR_CORRECTABLE: - case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: - case ACPI_EINJ_PROCESSOR_FATAL: - v5param->apicid = param1; - v5param->flags = SETWA_FLAGS_APICID; - break; - case ACPI_EINJ_MEMORY_CORRECTABLE: - case ACPI_EINJ_MEMORY_UNCORRECTABLE: - case ACPI_EINJ_MEMORY_FATAL: - v5param->memory_address = param1; - v5param->memory_address_range = param2; - v5param->flags = SETWA_FLAGS_MEM; - break; - case ACPI_EINJ_PCIX_CORRECTABLE: - case ACPI_EINJ_PCIX_UNCORRECTABLE: - case ACPI_EINJ_PCIX_FATAL: - v5param->pcie_sbdf = param1; - v5param->flags = SETWA_FLAGS_PCIE_SBDF; - break; - } - } - } else { - rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); - if (rc) - return rc; - if (einj_param) { - struct einj_parameter *v4param = einj_param; - v4param->param1 = param1; - v4param->param2 = param2; - } - } - rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); - if (rc) - return rc; - for (;;) { - rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - if (!(val & EINJ_OP_BUSY)) - break; - if (einj_timedout(&timeout)) - return -EIO; - } - rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); - if (rc) - return rc; - val = apei_exec_ctx_get_output(&ctx); - if (val != EINJ_STATUS_SUCCESS) - return -EBUSY; - - rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); - if (rc) - return rc; - trigger_paddr = apei_exec_ctx_get_output(&ctx); - if (notrigger == 0) { - rc = __einj_error_trigger(trigger_paddr, type, param1, param2); - if (rc) - return rc; - } - rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); - - return rc; -} - -/* Inject the specified hardware error */ -static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, - u64 param3, u64 param4) -{ - int rc; - u64 base_addr, size; - - /* If user manually set "flags", make sure it is legal */ - if (flags && (flags & - ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF))) - return -EINVAL; - - /* - * We need extra sanity checks for memory errors. - * Other types leap directly to injection. - */ - - /* ensure param1/param2 existed */ - if (!(param_extension || acpi5)) - goto inject; - - /* ensure injection is memory related */ - if (type & ACPI5_VENDOR_BIT) { - if (vendor_flags != SETWA_FLAGS_MEM) - goto inject; - } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) - goto inject; - - /* - * Disallow crazy address masks that give BIOS leeway to pick - * injection address almost anywhere. Insist on page or - * better granularity and that target address is normal RAM or - * NVDIMM. - */ - base_addr = param1 & param2; - size = ~param2 + 1; - - if (((param2 & PAGE_MASK) != PAGE_MASK) || - ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) - != REGION_INTERSECTS) && - (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) - != REGION_INTERSECTS))) - return -EINVAL; - -inject: - mutex_lock(&einj_mutex); - rc = __einj_error_inject(type, flags, param1, param2, param3, param4); - mutex_unlock(&einj_mutex); - - return rc; -} - -static u32 error_type; -static u32 error_flags; -static u64 error_param1; -static u64 error_param2; -static u64 error_param3; -static u64 error_param4; -static struct dentry *einj_debug_dir; - -static int available_error_type_show(struct seq_file *m, void *v) -{ - int rc; - u32 available_error_type = 0; - - rc = einj_get_available_error_type(&available_error_type); - if (rc) - return rc; - if (available_error_type & 0x0001) - seq_printf(m, "0x00000001\tProcessor Correctable\n"); - if (available_error_type & 0x0002) - seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n"); - if (available_error_type & 0x0004) - seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n"); - if (available_error_type & 0x0008) - seq_printf(m, "0x00000008\tMemory Correctable\n"); - if (available_error_type & 0x0010) - seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n"); - if (available_error_type & 0x0020) - seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n"); - if (available_error_type & 0x0040) - seq_printf(m, "0x00000040\tPCI Express Correctable\n"); - if (available_error_type & 0x0080) - seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n"); - if (available_error_type & 0x0100) - seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n"); - if (available_error_type & 0x0200) - seq_printf(m, "0x00000200\tPlatform Correctable\n"); - if (available_error_type & 0x0400) - seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n"); - if (available_error_type & 0x0800) - seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n"); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(available_error_type); - -static int error_type_get(void *data, u64 *val) -{ - *val = error_type; - - return 0; -} - -static int error_type_set(void *data, u64 val) -{ - int rc; - u32 available_error_type = 0; - u32 tval, vendor; - - /* - * Vendor defined types have 0x80000000 bit set, and - * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE - */ - vendor = val & ACPI5_VENDOR_BIT; - tval = val & 0x7fffffff; - - /* Only one error type can be specified */ - if (tval & (tval - 1)) - return -EINVAL; - if (!vendor) { - rc = einj_get_available_error_type(&available_error_type); - if (rc) - return rc; - if (!(val & available_error_type)) - return -EINVAL; - } - error_type = val; - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, - error_type_set, "0x%llx\n"); - -static int error_inject_set(void *data, u64 val) -{ - if (!error_type) - return -EINVAL; - - return einj_error_inject(error_type, error_flags, error_param1, error_param2, - error_param3, error_param4); -} - -DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, - error_inject_set, "%llu\n"); - -static int einj_check_table(struct acpi_table_einj *einj_tab) -{ - if ((einj_tab->header_length != - (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) - && (einj_tab->header_length != sizeof(struct acpi_table_einj))) - return -EINVAL; - if (einj_tab->header.length < sizeof(struct acpi_table_einj)) - return -EINVAL; - if (einj_tab->entries != - (einj_tab->header.length - sizeof(struct acpi_table_einj)) / - sizeof(struct acpi_einj_entry)) - return -EINVAL; - - return 0; -} - -static int __init einj_init(void) -{ - int rc; - acpi_status status; - struct dentry *fentry; - struct apei_exec_context ctx; - - if (acpi_disabled) { - pr_warn("ACPI disabled.\n"); - return -ENODEV; - } - - status = acpi_get_table(ACPI_SIG_EINJ, 0, - (struct acpi_table_header **)&einj_tab); - if (status == AE_NOT_FOUND) { - pr_warn("EINJ table not found.\n"); - return -ENODEV; - } - else if (ACPI_FAILURE(status)) { - pr_err("Failed to get EINJ table: %s\n", - acpi_format_exception(status)); - return -EINVAL; - } - - rc = einj_check_table(einj_tab); - if (rc) { - pr_warn(FW_BUG "Invalid EINJ table.\n"); - return -EINVAL; - } - - rc = -ENOMEM; - einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); - if (!einj_debug_dir) { - pr_err("Error creating debugfs node.\n"); - goto err_cleanup; - } - - fentry = debugfs_create_file("available_error_type", S_IRUSR, - einj_debug_dir, NULL, - &available_error_type_fops); - if (!fentry) - goto err_cleanup; - - fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, - einj_debug_dir, NULL, &error_type_fops); - if (!fentry) - goto err_cleanup; - fentry = debugfs_create_file("error_inject", S_IWUSR, - einj_debug_dir, NULL, &error_inject_fops); - if (!fentry) - goto err_cleanup; - - apei_resources_init(&einj_resources); - einj_exec_ctx_init(&ctx); - rc = apei_exec_collect_resources(&ctx, &einj_resources); - if (rc) { - pr_err("Error collecting EINJ resources.\n"); - goto err_fini; - } - - rc = apei_resources_request(&einj_resources, "APEI EINJ"); - if (rc) { - pr_err("Error requesting memory/port resources.\n"); - goto err_fini; - } - - rc = apei_exec_pre_map_gars(&ctx); - if (rc) { - pr_err("Error pre-mapping GARs.\n"); - goto err_release; - } - - rc = -ENOMEM; - einj_param = einj_get_parameter_address(); - if ((param_extension || acpi5) && einj_param) { - fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_flags); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param1); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param2); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param3); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param4); - if (!fentry) - goto err_unmap; - - fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, - einj_debug_dir, ¬rigger); - if (!fentry) - goto err_unmap; - } - - if (vendor_dev[0]) { - vendor_blob.data = vendor_dev; - vendor_blob.size = strlen(vendor_dev); - fentry = debugfs_create_blob("vendor", S_IRUSR, - einj_debug_dir, &vendor_blob); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, - einj_debug_dir, &vendor_flags); - if (!fentry) - goto err_unmap; - } - - pr_info("Error INJection is initialized.\n"); - - return 0; - -err_unmap: - if (einj_param) { - acpi_size size = (acpi5) ? - sizeof(struct set_error_type_with_address) : - sizeof(struct einj_parameter); - - acpi_os_unmap_iomem(einj_param, size); - pr_err("Error creating param extension debugfs nodes.\n"); - } - apei_exec_post_unmap_gars(&ctx); -err_release: - apei_resources_release(&einj_resources); -err_fini: - apei_resources_fini(&einj_resources); -err_cleanup: - pr_err("Error creating primary debugfs nodes.\n"); - debugfs_remove_recursive(einj_debug_dir); - - return rc; -} - -static void __exit einj_exit(void) -{ - struct apei_exec_context ctx; - - if (einj_param) { - acpi_size size = (acpi5) ? - sizeof(struct set_error_type_with_address) : - sizeof(struct einj_parameter); - - acpi_os_unmap_iomem(einj_param, size); - } - einj_exec_ctx_init(&ctx); - apei_exec_post_unmap_gars(&ctx); - apei_resources_release(&einj_resources); - apei_resources_fini(&einj_resources); - debugfs_remove_recursive(einj_debug_dir); -} - -module_init(einj_init); -module_exit(einj_exit); - -MODULE_AUTHOR("Huang Ying"); -MODULE_DESCRIPTION("APEI Error INJection support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 6330f557a2c8..ff0e8bf8e97a 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * APEI Error Record Serialization Table debug support * @@ -8,15 +9,6 @@ * * Copyright 2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -68,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg) switch (cmd) { case APEI_ERST_CLEAR_RECORD: - rc = copy_from_user(&record_id, (void __user *)arg, - sizeof(record_id)); - if (rc) + if (copy_from_user(&record_id, (void __user *)arg, + sizeof(record_id))) return -EFAULT; return erst_clear(record_id); case APEI_ERST_GET_RECORD_COUNT: @@ -119,16 +110,16 @@ retry_next: goto out; } retry: - rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len); + rc = len = erst_read_record(id, erst_dbg_buf, erst_dbg_buf_len, + erst_dbg_buf_len, NULL); /* The record may be cleared by others, try read next record */ if (rc == -ENOENT) goto retry_next; if (rc < 0) goto out; if (len > ERST_DBG_RECORD_LEN_MAX) { - pr_warning(ERST_DBG_PFX - "Record (ID: 0x%llx) length is too long: %zd\n", - id, len); + pr_warn(ERST_DBG_PFX + "Record (ID: 0x%llx) length is too long: %zd\n", id, len); rc = -EIO; goto out; } @@ -183,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, erst_dbg_buf = p; erst_dbg_buf_len = usize; } - rc = copy_from_user(erst_dbg_buf, ubuf, usize); - if (rc) { + if (copy_from_user(erst_dbg_buf, ubuf, usize)) { rc = -EFAULT; goto out; } @@ -207,7 +197,6 @@ static const struct file_operations erst_dbg_ops = { .read = erst_dbg_read, .write = erst_dbg_write, .unlocked_ioctl = erst_dbg_ioctl, - .llseek = no_llseek, }; static struct miscdevice erst_dbg_dev = { diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 9953e50667ec..bf65e3461531 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * APEI Error Record Serialization Table support * @@ -9,15 +10,6 @@ * * Copyright 2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -62,11 +54,15 @@ EXPORT_SYMBOL_GPL(erst_disable); static struct acpi_table_erst *erst_tab; -/* ERST Error Log Address Range atrributes */ +/* ERST Error Log Address Range attributes */ #define ERST_RANGE_RESERVED 0x0001 #define ERST_RANGE_NVRAM 0x0002 #define ERST_RANGE_SLOW 0x0004 +/* ERST Exec max timings */ +#define ERST_EXEC_TIMING_MAX_MASK 0xFFFFFFFF00000000 +#define ERST_EXEC_TIMING_MAX_SHIFT 32 + /* * ERST Error Log Address Range, used as buffer for reading/writing * error records. @@ -76,6 +72,7 @@ static struct erst_erange { u64 size; void __iomem *vaddr; u32 attr; + u64 timings; } erst_erange; /* @@ -105,6 +102,19 @@ static inline int erst_errno(int command_status) } } +static inline u64 erst_get_timeout(void) +{ + u64 timeout = FIRMWARE_TIMEOUT; + + if (erst_erange.attr & ERST_RANGE_SLOW) { + timeout = ((erst_erange.timings & ERST_EXEC_TIMING_MAX_MASK) >> + ERST_EXEC_TIMING_MAX_SHIFT) * NSEC_PER_MSEC; + if (timeout < FIRMWARE_TIMEOUT) + timeout = FIRMWARE_TIMEOUT; + } + return timeout; +} + static int erst_timedout(u64 *t, u64 spin_unit) { if ((s64)*t < spin_unit) { @@ -199,9 +209,11 @@ static int erst_exec_stall_while_true(struct apei_exec_context *ctx, { int rc; u64 val; - u64 timeout = FIRMWARE_TIMEOUT; + u64 timeout; u64 stall_time; + timeout = erst_get_timeout(); + if (ctx->var1 > FIRMWARE_MAX_STALL) { if (!in_nmi()) pr_warn(FW_WARN @@ -397,6 +409,13 @@ static int erst_get_erange(struct erst_erange *range) if (rc) return rc; range->attr = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_TIMINGS); + if (rc == 0) + range->timings = apei_exec_ctx_get_output(&ctx); + else if (rc == -ENOENT) + range->timings = 0; + else + return rc; return 0; } @@ -629,10 +648,12 @@ EXPORT_SYMBOL_GPL(erst_get_record_id_end); static int __erst_write_to_storage(u64 offset) { struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; + u64 timeout; u64 val; int rc; + timeout = erst_get_timeout(); + erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE); if (rc) @@ -668,10 +689,12 @@ static int __erst_write_to_storage(u64 offset) static int __erst_read_from_storage(u64 record_id, u64 offset) { struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; + u64 timeout; u64 val; int rc; + timeout = erst_get_timeout(); + erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ); if (rc) @@ -696,7 +719,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) break; if (erst_timedout(&timeout, SPIN_UNIT)) return -EIO; - }; + } rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); if (rc) return rc; @@ -711,10 +734,12 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) static int __erst_clear_from_storage(u64 record_id) { struct apei_exec_context ctx; - u64 timeout = FIRMWARE_TIMEOUT; + u64 timeout; u64 val; int rc; + timeout = erst_get_timeout(); + erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR); if (rc) @@ -864,6 +889,74 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record, } EXPORT_SYMBOL_GPL(erst_read); +static void erst_clear_cache(u64 record_id) +{ + int i; + u64 *entries; + + mutex_lock(&erst_record_id_cache.lock); + + entries = erst_record_id_cache.entries; + for (i = 0; i < erst_record_id_cache.len; i++) { + if (entries[i] == record_id) + entries[i] = APEI_ERST_INVALID_RECORD_ID; + } + __erst_record_id_cache_compact(); + + mutex_unlock(&erst_record_id_cache.lock); +} + +ssize_t erst_read_record(u64 record_id, struct cper_record_header *record, + size_t buflen, size_t recordlen, const guid_t *creatorid) +{ + ssize_t len; + + /* + * if creatorid is NULL, read any record for erst-dbg module + */ + if (creatorid == NULL) { + len = erst_read(record_id, record, buflen); + if (len == -ENOENT) + erst_clear_cache(record_id); + + return len; + } + + len = erst_read(record_id, record, buflen); + /* + * if erst_read return value is -ENOENT skip to next record_id, + * and clear the record_id cache. + */ + if (len == -ENOENT) { + erst_clear_cache(record_id); + goto out; + } + + if (len < 0) + goto out; + + /* + * if erst_read return value is less than record head length, + * consider it as -EIO, and clear the record_id cache. + */ + if (len < recordlen) { + len = -EIO; + erst_clear_cache(record_id); + goto out; + } + + /* + * if creatorid is not wanted, consider it as not found, + * for skipping to next record_id. + */ + if (!guid_equal(&record->creator_id, creatorid)) + len = -ENOENT; + +out: + return len; +} +EXPORT_SYMBOL_GPL(erst_read_record); + int erst_clear(u64 record_id) { int rc, i; @@ -899,7 +992,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); @@ -938,17 +1031,17 @@ static struct pstore_info erst_info = { }; #define CPER_CREATOR_PSTORE \ - UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ - 0x64, 0x90, 0xb8, 0x9d) + GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ + 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_DMESG \ - UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \ - 0x94, 0x19, 0xeb, 0x12) + GUID_INIT(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \ + 0x94, 0x19, 0xeb, 0x12) #define CPER_SECTION_TYPE_DMESG_Z \ - UUID_LE(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \ - 0x34, 0xdd, 0xfa, 0xc6) + GUID_INIT(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \ + 0x34, 0xdd, 0xfa, 0xc6) #define CPER_SECTION_TYPE_MCE \ - UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ - 0x04, 0x4a, 0x38, 0xfc) + GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ + 0x04, 0x4a, 0x38, 0xfc) struct cper_pstore_record { struct cper_record_header hdr; @@ -960,14 +1053,10 @@ static int reader_pos; static int erst_open_pstore(struct pstore_info *psi) { - int rc; - if (erst_disable) return -ENODEV; - rc = erst_get_record_id_begin(&reader_pos); - - return rc; + return erst_get_record_id_begin(&reader_pos); } static int erst_close_pstore(struct pstore_info *psi) @@ -1004,16 +1093,13 @@ skip: goto out; } - len = erst_read(record_id, &rcd->hdr, rcd_len); + len = erst_read_record(record_id, &rcd->hdr, rcd_len, sizeof(*rcd), + &CPER_CREATOR_PSTORE); /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; - else if (len < 0 || len < sizeof(*rcd)) { - rc = -EIO; + else if (len < 0) goto out; - } - if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) - goto skip; record->buf = kmalloc(len, GFP_KERNEL); if (record->buf == NULL) { @@ -1024,15 +1110,12 @@ skip: record->id = record_id; record->compressed = false; record->ecc_notice_size = 0; - if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_DMESG_Z) == 0) { + if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) { record->type = PSTORE_TYPE_DMESG; record->compressed = true; - } else if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_DMESG) == 0) + } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG)) record->type = PSTORE_TYPE_DMESG; - else if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_MCE) == 0) + else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_MCE)) record->type = PSTORE_TYPE_MCE; else record->type = PSTORE_TYPE_MAX; @@ -1133,7 +1216,7 @@ static int __init erst_init(void) rc = erst_check_table(erst_tab); if (rc) { pr_err(FW_BUG "ERST table is invalid.\n"); - goto err; + goto err_put_erst_tab; } apei_resources_init(&erst_resources); @@ -1207,6 +1290,8 @@ err_release: apei_resources_release(&erst_resources); err_fini: apei_resources_fini(&erst_resources); +err_put_erst_tab: + acpi_put_table((struct acpi_table_header *)erst_tab); err: erst_disable = 1; return rc; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f008ba7c9ced..0dc767392a6c 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * APEI Generic Hardware Error Source support * @@ -14,26 +15,19 @@ * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation; - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ +#include <linux/arm_sdei.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/cper.h> -#include <linux/kdebug.h> +#include <linux/cleanup.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/ratelimit.h> @@ -41,18 +35,23 @@ #include <linux/irq_work.h> #include <linux/llist.h> #include <linux/genalloc.h> +#include <linux/kfifo.h> #include <linux/pci.h> +#include <linux/pfn.h> #include <linux/aer.h> #include <linux/nmi.h> #include <linux/sched/clock.h> #include <linux/uuid.h> #include <linux/ras.h> +#include <linux/task_work.h> +#include <linux/vmcore_info.h> #include <acpi/actbl1.h> #include <acpi/ghes.h> #include <acpi/apei.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> +#include <cxl/event.h> #include <ras/ras_event.h> #include "apei-internal.h" @@ -85,12 +84,43 @@ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) +#define GHES_VENDOR_ENTRY_LEN(gdata_len) \ + (sizeof(struct ghes_vendor_record_entry) + (gdata_len)) +#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \ + ((struct acpi_hest_generic_data *) \ + ((struct ghes_vendor_record_entry *)(vendor_entry) + 1)) + +/* + * NMI-like notifications vary by architecture, before the compiler can prune + * unused static functions it needs a value for these enums. + */ +#ifndef CONFIG_ARM_SDE_INTERFACE +#define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses +#define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses +#endif + +static ATOMIC_NOTIFIER_HEAD(ghes_report_chain); + static inline bool is_hest_type_generic_v2(struct ghes *ghes) { return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; } /* + * A platform may describe one error source for the handling of synchronous + * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI + * or External Interrupt). On x86, the HEST notifications are always + * asynchronous, so only SEA on ARM is delivered as a synchronous + * notification. + */ +static inline bool is_hest_sync_notify(struct ghes *ghes) +{ + u8 notify_type = ghes->generic->notify.type; + + return notify_type == ACPI_HEST_NOTIFY_SEA; +} + +/* * This driver isn't really modular, however for the time being, * continuing to use module_param is the easiest way to remain * compatible with existing boot arg use cases. @@ -99,6 +129,13 @@ bool ghes_disable; module_param_named(disable, ghes_disable, bool, 0); /* + * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform + * check. + */ +static bool ghes_edac_force_enable; +module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0); + +/* * All error sources notified with HED (Hardware Error Device) share a * single notifier callback, so they need to be linked and checked one * by one. This holds true for NMI too. @@ -110,102 +147,98 @@ static LIST_HEAD(ghes_hed); static DEFINE_MUTEX(ghes_list_mutex); /* + * A list of GHES devices which are given to the corresponding EDAC driver + * ghes_edac for further use. + */ +static LIST_HEAD(ghes_devs); +static DEFINE_MUTEX(ghes_devs_mutex); + +/* * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so * the fixmap is used instead. * - * These 2 spinlocks are used to prevent the fixmap entries from being used + * This spinlock is used to prevent the fixmap entry from being used * simultaneously. */ -static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); -static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); +static DEFINE_SPINLOCK(ghes_notify_lock_irq); + +struct ghes_vendor_record_entry { + struct work_struct work; + int error_severity; + char vendor_record[]; +}; static struct gen_pool *ghes_estatus_pool; -static unsigned long ghes_estatus_pool_size_request; -static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; +static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; -static int ghes_panic_timeout __read_mostly = 30; - -static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) +static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx) { phys_addr_t paddr; pgprot_t prot; - paddr = pfn << PAGE_SHIFT; + paddr = PFN_PHYS(pfn); prot = arch_apei_get_mem_attribute(paddr); - __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); + __set_fixmap(fixmap_idx, paddr, prot); - return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); + return (void __iomem *) __fix_to_virt(fixmap_idx); } -static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) +static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) { - phys_addr_t paddr; - pgprot_t prot; - - paddr = pfn << PAGE_SHIFT; - prot = arch_apei_get_mem_attribute(paddr); - __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); + int _idx = virt_to_fix((unsigned long)vaddr); - return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); + WARN_ON_ONCE(fixmap_idx != _idx); + clear_fixmap(fixmap_idx); } -static void ghes_iounmap_nmi(void) +int ghes_estatus_pool_init(unsigned int num_ghes) { - clear_fixmap(FIX_APEI_GHES_NMI); -} - -static void ghes_iounmap_irq(void) -{ - clear_fixmap(FIX_APEI_GHES_IRQ); -} + unsigned long addr, len; + int rc; -static int ghes_estatus_pool_init(void) -{ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); if (!ghes_estatus_pool) return -ENOMEM; + + len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX; + len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE); + + addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); + if (!addr) + goto err_pool_alloc; + + rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); + if (rc) + goto err_pool_add; + return 0; -} -static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, - struct gen_pool_chunk *chunk, - void *data) -{ - free_page(chunk->start_addr); -} +err_pool_add: + vfree((void *)addr); -static void ghes_estatus_pool_exit(void) -{ - gen_pool_for_each_chunk(ghes_estatus_pool, - ghes_estatus_pool_free_chunk_page, NULL); +err_pool_alloc: gen_pool_destroy(ghes_estatus_pool); + + return -ENOMEM; } -static int ghes_estatus_pool_expand(unsigned long len) +/** + * ghes_estatus_pool_region_free - free previously allocated memory + * from the ghes_estatus_pool. + * @addr: address of memory to free. + * @size: size of memory to free. + * + * Returns none. + */ +void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { - unsigned long i, pages, size, addr; - int ret; - - ghes_estatus_pool_size_request += PAGE_ALIGN(len); - size = gen_pool_size(ghes_estatus_pool); - if (size >= ghes_estatus_pool_size_request) - return 0; - pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; - for (i = 0; i < pages; i++) { - addr = __get_free_page(GFP_KERNEL); - if (!addr) - return -ENOMEM; - ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); - if (ret) - return ret; - } - - return 0; + gen_pool_free(ghes_estatus_pool, addr, size); } +EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free); static int map_gen_v2(struct ghes *ghes) { @@ -217,6 +250,21 @@ static void unmap_gen_v2(struct ghes *ghes) apei_unmap_generic_address(&ghes->generic_v2->read_ack_register); } +static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2) +{ + int rc; + u64 val = 0; + + rc = apei_read(&val, &gv2->read_ack_register); + if (rc) + return; + + val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; + val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; + + apei_write(val, &gv2->read_ack_register); +} + static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -239,10 +287,10 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) goto err_unmap_read_ack_addr; error_block_length = generic->error_block_length; if (error_block_length > GHES_ESTATUS_MAX_SIZE) { - pr_warning(FW_WARN GHES_PFX - "Error status block length is too long: %u for " - "generic hardware error source: %d.\n", - error_block_length, generic->header.source_id); + pr_warn(FW_WARN GHES_PFX + "Error status block length is too long: %u for " + "generic hardware error source: %d.\n", + error_block_length, generic->header.source_id); error_block_length = GHES_ESTATUS_MAX_SIZE; } ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); @@ -289,23 +337,16 @@ static inline int ghes_severity(int severity) } static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, - int from_phys) + int from_phys, + enum fixed_addresses fixmap_idx) { void __iomem *vaddr; - unsigned long flags = 0; - int in_nmi = in_nmi(); u64 offset; u32 trunk; while (len > 0) { offset = paddr - (paddr & PAGE_MASK); - if (in_nmi) { - raw_spin_lock(&ghes_ioremap_lock_nmi); - vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); - } else { - spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); - vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); - } + vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx); trunk = PAGE_SIZE - offset; trunk = min(trunk, len); if (from_phys) @@ -315,103 +356,243 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, len -= trunk; paddr += trunk; buffer += trunk; - if (in_nmi) { - ghes_iounmap_nmi(); - raw_spin_unlock(&ghes_ioremap_lock_nmi); - } else { - ghes_iounmap_irq(); - spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); - } + ghes_unmap(vaddr, fixmap_idx); + } +} + +/* Check the top-level record header has an appropriate size. */ +static int __ghes_check_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus) +{ + u32 len = cper_estatus_len(estatus); + + if (len < sizeof(*estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n"); + return -EIO; + } + + if (len > ghes->generic->error_block_length) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n"); + return -EIO; + } + + if (cper_estatus_check_header(estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n"); + return -EIO; } + + return 0; } -static int ghes_read_estatus(struct ghes *ghes, int silent) +/* Read the CPER block, returning its address, and header in estatus. */ +static int __ghes_peek_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 *buf_paddr, enum fixed_addresses fixmap_idx) { struct acpi_hest_generic *g = ghes->generic; - u64 buf_paddr; - u32 len; int rc; - rc = apei_read(&buf_paddr, &g->error_status_address); + rc = apei_read(buf_paddr, &g->error_status_address); if (rc) { - if (!silent && printk_ratelimit()) - pr_warning(FW_WARN GHES_PFX + *buf_paddr = 0; + pr_warn_ratelimited(FW_WARN GHES_PFX "Failed to read error status block address for hardware error source: %d.\n", g->header.source_id); return -EIO; } - if (!buf_paddr) + if (!*buf_paddr) return -ENOENT; - ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, - sizeof(*ghes->estatus), 1); - if (!ghes->estatus->block_status) + ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1, + fixmap_idx); + if (!estatus->block_status) { + *buf_paddr = 0; return -ENOENT; + } + + return 0; +} - ghes->buffer_paddr = buf_paddr; - ghes->flags |= GHES_TO_CLEAR; +static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx, + size_t buf_len) +{ + ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx); + if (cper_estatus_check(estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX + "Failed to read error status block!\n"); + return -EIO; + } - rc = -EIO; - len = cper_estatus_len(ghes->estatus); - if (len < sizeof(*ghes->estatus)) - goto err_read_block; - if (len > ghes->generic->error_block_length) - goto err_read_block; - if (cper_estatus_check_header(ghes->estatus)) - goto err_read_block; - ghes_copy_tofrom_phys(ghes->estatus + 1, - buf_paddr + sizeof(*ghes->estatus), - len - sizeof(*ghes->estatus), 1); - if (cper_estatus_check(ghes->estatus)) - goto err_read_block; - rc = 0; - -err_read_block: - if (rc && !silent && printk_ratelimit()) - pr_warning(FW_WARN GHES_PFX - "Failed to read error status block!\n"); - return rc; + return 0; } -static void ghes_clear_estatus(struct ghes *ghes) +static int ghes_read_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 *buf_paddr, enum fixed_addresses fixmap_idx) { - ghes->estatus->block_status = 0; - if (!(ghes->flags & GHES_TO_CLEAR)) + int rc; + + rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx); + if (rc) + return rc; + + rc = __ghes_check_estatus(ghes, estatus); + if (rc) + return rc; + + return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx, + cper_estatus_len(estatus)); +} + +static void ghes_clear_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx) +{ + estatus->block_status = 0; + + if (!buf_paddr) return; - ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, - sizeof(ghes->estatus->block_status), 0); - ghes->flags &= ~GHES_TO_CLEAR; + + ghes_copy_tofrom_phys(estatus, buf_paddr, + sizeof(estatus->block_status), 0, + fixmap_idx); + + /* + * GHESv2 type HEST entries introduce support for error acknowledgment, + * so only acknowledge the error if this support is present. + */ + if (is_hest_type_generic_v2(ghes)) + ghes_ack_error(ghes->generic_v2); } -static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) +/** + * struct ghes_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: work control flags + * + * Structure to pass task work to be handled before + * returning to user-space via task_work_add(). + */ +struct ghes_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) { -#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE + struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork); + int ret; + + ret = memory_failure(twcb->pfn, twcb->flags); + gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb)); + + if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP) + return; + + pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", + twcb->pfn, current->comm, task_pid_nr(current)); + force_sig(SIGBUS); +} + +static bool ghes_do_memory_failure(u64 physical_addr, int flags) +{ + struct ghes_task_work *twcb; unsigned long pfn; + + if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) + return false; + + pfn = PHYS_PFN(physical_addr); + + if (flags == MF_ACTION_REQUIRED && current->mm) { + twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb)); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return true; + } + + memory_failure_queue(pfn, flags); + return true; +} + +static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, + int sev, bool sync) +{ int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) - return; - - pfn = mem_err->physical_addr >> PAGE_SHIFT; - if (!pfn_valid(pfn)) { - pr_warn_ratelimited(FW_WARN GHES_PFX - "Invalid address in generic error data: %#llx\n", - mem_err->physical_addr); - return; - } + return false; /* iff following two events can be handled properly by now */ if (sec_sev == GHES_SEV_CORRECTED && (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) flags = MF_SOFT_OFFLINE; if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) - flags = 0; + flags = sync ? MF_ACTION_REQUIRED : 0; if (flags != -1) - memory_failure_queue(pfn, flags); -#endif + return ghes_do_memory_failure(mem_err->physical_addr, flags); + + return false; +} + +static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, + int sev, bool sync) +{ + struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); + int flags = sync ? MF_ACTION_REQUIRED : 0; + char error_type[120]; + bool queued = false; + int sec_sev, i; + char *p; + + sec_sev = ghes_severity(gdata->error_severity); + log_arm_hw_error(err, sec_sev); + if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE) + return false; + + p = (char *)(err + 1); + for (i = 0; i < err->err_info_num; i++) { + struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p; + bool is_cache = err_info->type & CPER_ARM_CACHE_ERROR; + bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR); + + /* + * The field (err_info->error_info & BIT(26)) is fixed to set to + * 1 in some old firmware of HiSilicon Kunpeng920. We assume that + * firmware won't mix corrected errors in an uncorrected section, + * and don't filter out 'corrected' error here. + */ + if (is_cache && has_pa) { + queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags); + p += err_info->length; + continue; + } + + cper_bits_to_str(error_type, sizeof(error_type), + FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type), + cper_proc_error_type_strs, + ARRAY_SIZE(cper_proc_error_type_strs)); + + pr_warn_ratelimited(FW_WARN GHES_PFX + "Unhandled processor error type 0x%02x: %s%s\n", + err_info->type, error_type, + (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : ""); + p += err_info->length; + } + + return queued; } /* @@ -437,6 +618,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { unsigned int devfn; int aer_severity; + u8 *aer_info; devfn = PCI_DEVFN(pcie_err->device_id.device, pcie_err->device_id.function); @@ -450,23 +632,283 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) if (gdata->flags & CPER_SEC_RESET) aer_severity = AER_FATAL; + aer_info = (void *)gen_pool_alloc(ghes_estatus_pool, + sizeof(struct aer_capability_regs)); + if (!aer_info) + return; + memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs)); + aer_recover_queue(pcie_err->device_id.segment, pcie_err->device_id.bus, devfn, aer_severity, (struct aer_capability_regs *) - pcie_err->aer_info); + aer_info); } #endif } +static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list); + +int ghes_register_vendor_record_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier); + +void ghes_unregister_vendor_record_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier); + +static void ghes_vendor_record_work_func(struct work_struct *work) +{ + struct ghes_vendor_record_entry *entry; + struct acpi_hest_generic_data *gdata; + u32 len; + + entry = container_of(work, struct ghes_vendor_record_entry, work); + gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + + blocking_notifier_call_chain(&vendor_record_notify_list, + entry->error_severity, gdata); + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len); +} + +static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, + int sev) +{ + struct acpi_hest_generic_data *copied_gdata; + struct ghes_vendor_record_entry *entry; + u32 len; + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + entry = (void *)gen_pool_alloc(ghes_estatus_pool, len); + if (!entry) + return; + + copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata)); + entry->error_severity = sev; + + INIT_WORK(&entry->work, ghes_vendor_record_work_func); + schedule_work(&entry->work); +} + +/* Room for 8 entries */ +#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8 +static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data, + CXL_CPER_PROT_ERR_FIFO_DEPTH); + +/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */ +static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock); +struct work_struct *cxl_cper_prot_err_work; + +static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err, + int severity) +{ +#ifdef CONFIG_ACPI_APEI_PCIEAER + struct cxl_cper_prot_err_work_data wd; + u8 *dvsec_start, *cap_start; + + if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) { + pr_err_ratelimited("CXL CPER invalid agent type\n"); + return; + } + + if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) { + pr_err_ratelimited("CXL CPER invalid protocol error log\n"); + return; + } + + if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) { + pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n", + prot_err->err_len); + return; + } + + if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER)) + pr_warn(FW_WARN "CXL CPER no device serial number\n"); + + guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock); + + if (!cxl_cper_prot_err_work) + return; + + switch (prot_err->agent_type) { + case RCD: + case DEVICE: + case LD: + case FMLD: + case RP: + case DSP: + case USP: + memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err)); + + dvsec_start = (u8 *)(prot_err + 1); + cap_start = dvsec_start + prot_err->dvsec_len; + + memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap)); + wd.severity = cper_severity_to_aer(severity); + break; + default: + pr_err_ratelimited("CXL CPER invalid agent type: %d\n", + prot_err->agent_type); + return; + } + + if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) { + pr_err_ratelimited("CXL CPER kfifo overflow\n"); + return; + } + + schedule_work(cxl_cper_prot_err_work); +#endif +} + +int cxl_cper_register_prot_err_work(struct work_struct *work) +{ + if (cxl_cper_prot_err_work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_prot_err_work_lock); + cxl_cper_prot_err_work = work; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL"); + +int cxl_cper_unregister_prot_err_work(struct work_struct *work) +{ + if (cxl_cper_prot_err_work != work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_prot_err_work_lock); + cxl_cper_prot_err_work = NULL; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL"); + +int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd) +{ + return kfifo_get(&cxl_cper_prot_err_fifo, wd); +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL"); + +/* Room for 8 entries for each of the 4 event log queues */ +#define CXL_CPER_FIFO_DEPTH 32 +DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH); + +/* Synchronize schedule_work() with cxl_cper_work changes */ +static DEFINE_SPINLOCK(cxl_cper_work_lock); +struct work_struct *cxl_cper_work; + +static void cxl_cper_post_event(enum cxl_event_type event_type, + struct cxl_cper_event_rec *rec) +{ + struct cxl_cper_work_data wd; + + if (rec->hdr.length <= sizeof(rec->hdr) || + rec->hdr.length > sizeof(*rec)) { + pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", + rec->hdr.length); + return; + } + + if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { + pr_err(FW_WARN "CXL CPER invalid event\n"); + return; + } + + guard(spinlock_irqsave)(&cxl_cper_work_lock); + + if (!cxl_cper_work) + return; + + wd.event_type = event_type; + memcpy(&wd.rec, rec, sizeof(wd.rec)); + + if (!kfifo_put(&cxl_cper_fifo, wd)) { + pr_err_ratelimited("CXL CPER kfifo overflow\n"); + return; + } + + schedule_work(cxl_cper_work); +} + +int cxl_cper_register_work(struct work_struct *work) +{ + if (cxl_cper_work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = work; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL"); + +int cxl_cper_unregister_work(struct work_struct *work) +{ + if (cxl_cper_work != work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = NULL; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL"); + +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) +{ + return kfifo_get(&cxl_cper_fifo, wd); +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL"); + +static void ghes_log_hwerr(int sev, guid_t *sec_type) +{ + if (sev != CPER_SEV_RECOVERABLE) + return; + + if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) || + guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) || + guid_equal(sec_type, &CPER_SEC_PROC_IA)) { + hwerr_log_error_type(HWERR_RECOV_CPU); + return; + } + + if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) || + guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) || + guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) || + guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { + hwerr_log_error_type(HWERR_RECOV_CXL); + return; + } + + if (guid_equal(sec_type, &CPER_SEC_PCIE) || + guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) { + hwerr_log_error_type(HWERR_RECOV_PCI); + return; + } + + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + hwerr_log_error_type(HWERR_RECOV_MEMORY); + return; + } + + hwerr_log_error_type(HWERR_RECOV_OTHERS); +} + static void ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { int sev, sec_sev; struct acpi_hest_generic_data *gdata; guid_t *sec_type; - guid_t *fru_id = &NULL_UUID_LE; + const guid_t *fru_id = &guid_null; char *fru_text = ""; + bool queued = false; + bool sync = is_hest_sync_notify(ghes); sev = ghes_severity(estatus->error_severity); apei_estatus_for_each_section(estatus, gdata) { @@ -478,29 +920,54 @@ static void ghes_do_proc(struct ghes *ghes, if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) fru_text = gdata->fru_text; + ghes_log_hwerr(sev, sec_type); if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); - ghes_edac_report_mem_error(sev, mem_err); + atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); arch_apei_report_mem_error(sev, mem_err); - ghes_handle_memory_failure(gdata, sev); - } - else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + queued = ghes_handle_memory_failure(gdata, sev, sync); + } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { ghes_handle_aer(gdata); - } - else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { - struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { + queued = ghes_handle_arm_hw_error(gdata, sev, sync); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) { + struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata); + + cxl_cper_post_prot_err(prot_err, gdata->error_severity); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); - log_arm_hw_error(err); + cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); } else { void *err = acpi_hest_get_payload(gdata); + ghes_defer_non_standard_event(gdata, sev); log_non_standard_event(sec_type, fru_id, fru_text, sec_sev, err, gdata->error_data_length); } } + + /* + * If no memory failure work is queued for abnormal synchronous + * errors, do a force kill. + */ + if (sync && !queued) { + dev_err(ghes->dev, + HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n", + current->comm, task_pid_nr(current)); + force_sig(SIGBUS); + } } static void __ghes_print_estatus(const char *pfx, @@ -608,48 +1075,42 @@ static struct ghes_estatus_cache *ghes_estatus_cache_alloc( return cache; } -static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) +static void ghes_estatus_cache_rcu_free(struct rcu_head *head) { + struct ghes_estatus_cache *cache; u32 len; + cache = container_of(head, struct ghes_estatus_cache, rcu); len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); len = GHES_ESTATUS_CACHE_LEN(len); gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); atomic_dec(&ghes_estatus_cache_alloced); } -static void ghes_estatus_cache_rcu_free(struct rcu_head *head) -{ - struct ghes_estatus_cache *cache; - - cache = container_of(head, struct ghes_estatus_cache, rcu); - ghes_estatus_cache_free(cache); -} - -static void ghes_estatus_cache_add( - struct acpi_hest_generic *generic, - struct acpi_hest_generic_status *estatus) +static void +ghes_estatus_cache_add(struct acpi_hest_generic *generic, + struct acpi_hest_generic_status *estatus) { - int i, slot = -1, count; unsigned long long now, duration, period, max_period = 0; - struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; + struct ghes_estatus_cache *cache, *new_cache; + struct ghes_estatus_cache __rcu *victim; + int i, slot = -1, count; new_cache = ghes_estatus_cache_alloc(generic, estatus); - if (new_cache == NULL) + if (!new_cache) return; + rcu_read_lock(); now = sched_clock(); for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { cache = rcu_dereference(ghes_estatus_caches[i]); if (cache == NULL) { slot = i; - slot_cache = NULL; break; } duration = now - cache->time_in; if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { slot = i; - slot_cache = cache; break; } count = atomic_read(&cache->count); @@ -658,77 +1119,71 @@ static void ghes_estatus_cache_add( if (period > max_period) { max_period = period; slot = i; - slot_cache = cache; } } - /* new_cache must be put into array after its contents are written */ - smp_wmb(); - if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, - slot_cache, new_cache) == slot_cache) { - if (slot_cache) - call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); - } else - ghes_estatus_cache_free(new_cache); rcu_read_unlock(); -} - -static int ghes_ack_error(struct acpi_hest_generic_v2 *gv2) -{ - int rc; - u64 val = 0; - - rc = apei_read(&val, &gv2->read_ack_register); - if (rc) - return rc; - val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; - val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; + if (slot != -1) { + /* + * Use release semantics to ensure that ghes_estatus_cached() + * running on another CPU will see the updated cache fields if + * it can see the new value of the pointer. + */ + victim = xchg_release(&ghes_estatus_caches[slot], + RCU_INITIALIZER(new_cache)); - return apei_write(val, &gv2->read_ack_register); + /* + * At this point, victim may point to a cached item different + * from the one based on which we selected the slot. Instead of + * going to the loop again to pick another slot, let's just + * drop the other item anyway: this may cause a false cache + * miss later on, but that won't cause any problems. + */ + if (victim) + call_rcu(&unrcu_pointer(victim)->rcu, + ghes_estatus_cache_rcu_free); + } } -static void __ghes_panic(struct ghes *ghes) +static void __ghes_panic(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx) { - __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); + const char *msg = GHES_PFX "Fatal hardware error"; + + __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); - ghes_clear_estatus(ghes); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); - /* reboot to log the error! */ if (!panic_timeout) - panic_timeout = ghes_panic_timeout; - panic("Fatal hardware error!"); + pr_emerg("%s but panic disabled\n", msg); + + panic(msg); } static int ghes_proc(struct ghes *ghes) { + struct acpi_hest_generic_status *estatus = ghes->estatus; + u64 buf_paddr; int rc; - rc = ghes_read_estatus(ghes, 0); + rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ); if (rc) goto out; - if (ghes_severity(ghes->estatus->error_severity) >= GHES_SEV_PANIC) { - __ghes_panic(ghes); - } + if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC) + __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); - if (!ghes_estatus_cached(ghes->estatus)) { - if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) - ghes_estatus_cache_add(ghes->generic, ghes->estatus); + if (!ghes_estatus_cached(estatus)) { + if (ghes_print_estatus(NULL, ghes->generic, estatus)) + ghes_estatus_cache_add(ghes->generic, estatus); } - ghes_do_proc(ghes, ghes->estatus); + ghes_do_proc(ghes, estatus); out: - ghes_clear_estatus(ghes); - - if (rc == -ENOENT) - return rc; - - /* - * GHESv2 type HEST entries introduce support for error acknowledgment, - * so only acknowledge the error if this support is present. - */ - if (is_hest_type_generic_v2(ghes)) - return ghes_ack_error(ghes->generic_v2); + ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); return rc; } @@ -739,8 +1194,8 @@ static void ghes_add_timer(struct ghes *ghes) unsigned long expire; if (!g->notify.poll_interval) { - pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", - g->header.source_id); + pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", + g->header.source_id); return; } expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); @@ -750,9 +1205,12 @@ static void ghes_add_timer(struct ghes *ghes) static void ghes_poll_func(struct timer_list *t) { - struct ghes *ghes = from_timer(ghes, t, timer); + struct ghes *ghes = timer_container_of(ghes, t, timer); + unsigned long flags; + spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (!(ghes->flags & GHES_EXITING)) ghes_add_timer(ghes); } @@ -760,9 +1218,12 @@ static void ghes_poll_func(struct timer_list *t) static irqreturn_t ghes_irq_func(int irq, void *data) { struct ghes *ghes = data; + unsigned long flags; int rc; + spin_lock_irqsave(&ghes_notify_lock_irq, flags); rc = ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (rc) return IRQ_NONE; @@ -773,14 +1234,15 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event, void *data) { struct ghes *ghes; + unsigned long flags; int ret = NOTIFY_DONE; - rcu_read_lock(); + spin_lock_irqsave(&ghes_notify_lock_irq, flags); list_for_each_entry_rcu(ghes, &ghes_hed, list) { if (!ghes_proc(ghes)) ret = NOTIFY_OK; } - rcu_read_unlock(); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return ret; } @@ -789,66 +1251,20 @@ static struct notifier_block ghes_notifier_hed = { .notifier_call = ghes_notify_hed, }; -#ifdef CONFIG_ACPI_APEI_SEA -static LIST_HEAD(ghes_sea); - /* - * Return 0 only if one of the SEA error sources successfully reported an error - * record sent from the firmware. - */ -int ghes_notify_sea(void) -{ - struct ghes *ghes; - int ret = -ENOENT; - - rcu_read_lock(); - list_for_each_entry_rcu(ghes, &ghes_sea, list) { - if (!ghes_proc(ghes)) - ret = 0; - } - rcu_read_unlock(); - return ret; -} - -static void ghes_sea_add(struct ghes *ghes) -{ - mutex_lock(&ghes_list_mutex); - list_add_rcu(&ghes->list, &ghes_sea); - mutex_unlock(&ghes_list_mutex); -} - -static void ghes_sea_remove(struct ghes *ghes) -{ - mutex_lock(&ghes_list_mutex); - list_del_rcu(&ghes->list); - mutex_unlock(&ghes_list_mutex); - synchronize_rcu(); -} -#else /* CONFIG_ACPI_APEI_SEA */ -static inline void ghes_sea_add(struct ghes *ghes) { } -static inline void ghes_sea_remove(struct ghes *ghes) { } -#endif /* CONFIG_ACPI_APEI_SEA */ - -#ifdef CONFIG_HAVE_ACPI_APEI_NMI -/* - * printk is not safe in NMI context. So in NMI handler, we allocate - * required memory from lock-less memory allocator - * (ghes_estatus_pool), save estatus into it, put them into lock-less - * list (ghes_estatus_llist), then delay printk into IRQ context via - * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record - * required pool size by all NMI error source. + * Handlers for CPER records may not be NMI safe. For example, + * memory_failure_queue() takes spinlocks and calls schedule_work_on(). + * In any NMI-like handler, memory from ghes_estatus_pool is used to save + * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes + * ghes_proc_in_irq() to run in IRQ context where each estatus in + * ghes_estatus_llist is processed. + * + * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache + * to suppress frequent messages. */ static struct llist_head ghes_estatus_llist; static struct irq_work ghes_proc_irq_work; -/* - * NMI may be triggered on any CPU, so ghes_in_nmi is used for - * having only one concurrent reader. - */ -static atomic_t ghes_in_nmi = ATOMIC_INIT(0); - -static LIST_HEAD(ghes_nmi); - static void ghes_proc_in_irq(struct irq_work *irq_work) { struct llist_node *llnode, *next; @@ -870,7 +1286,9 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) estatus = GHES_ESTATUS_FROM_NODE(estatus_node); len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); + ghes_do_proc(estatus_node->ghes, estatus); + if (!ghes_estatus_cached(estatus)) { generic = estatus_node->generic; if (ghes_print_estatus(NULL, generic, estatus)) @@ -878,6 +1296,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) } gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); + llnode = next; } } @@ -905,96 +1324,154 @@ static void ghes_print_queued_estatus(void) } } -/* Save estatus for further processing in IRQ context */ -static void __process_error(struct ghes *ghes) +static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, + enum fixed_addresses fixmap_idx) { -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - u32 len, node_len; + struct acpi_hest_generic_status *estatus, tmp_header; struct ghes_estatus_node *estatus_node; - struct acpi_hest_generic_status *estatus; + u32 len, node_len; + u64 buf_paddr; + int sev, rc; - if (ghes_estatus_cached(ghes->estatus)) - return; + if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) + return -EOPNOTSUPP; - len = cper_estatus_len(ghes->estatus); - node_len = GHES_ESTATUS_NODE_LEN(len); + rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx); + if (rc) { + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); + return rc; + } + + rc = __ghes_check_estatus(ghes, &tmp_header); + if (rc) { + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); + return rc; + } + len = cper_estatus_len(&tmp_header); + node_len = GHES_ESTATUS_NODE_LEN(len); estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); if (!estatus_node) - return; + return -ENOMEM; estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - memcpy(estatus, ghes->estatus, len); - llist_add(&estatus_node->llnode, &ghes_estatus_llist); -#endif -} -static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) -{ - struct ghes *ghes; - int sev, ret = NMI_DONE; + if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); + rc = -ENOENT; + goto no_work; + } - if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) - return ret; + sev = ghes_severity(estatus->error_severity); + if (sev >= GHES_SEV_PANIC) { + ghes_print_queued_estatus(); + __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); + } - list_for_each_entry_rcu(ghes, &ghes_nmi, list) { - if (ghes_read_estatus(ghes, 1)) { - ghes_clear_estatus(ghes); - continue; - } else { - ret = NMI_HANDLED; - } + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); - sev = ghes_severity(ghes->estatus->error_severity); - if (sev >= GHES_SEV_PANIC) { - oops_begin(); - ghes_print_queued_estatus(); - __ghes_panic(ghes); - } + /* This error has been reported before, don't process it again. */ + if (ghes_estatus_cached(estatus)) + goto no_work; - if (!(ghes->flags & GHES_TO_CLEAR)) - continue; + llist_add(&estatus_node->llnode, &ghes_estatus_llist); + + return rc; + +no_work: + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, + node_len); + + return rc; +} - __process_error(ghes); - ghes_clear_estatus(ghes); +static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, + enum fixed_addresses fixmap_idx) +{ + int ret = -ENOENT; + struct ghes *ghes; + + rcu_read_lock(); + list_for_each_entry_rcu(ghes, rcu_list, list) { + if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) + ret = 0; } + rcu_read_unlock(); -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - if (ret == NMI_HANDLED) + if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret) irq_work_queue(&ghes_proc_irq_work); -#endif - atomic_dec(&ghes_in_nmi); + return ret; } -static unsigned long ghes_esource_prealloc_size( - const struct acpi_hest_generic *generic) +#ifdef CONFIG_ACPI_APEI_SEA +static LIST_HEAD(ghes_sea); + +/* + * Return 0 only if one of the SEA error sources successfully reported an error + * record sent from the firmware. + */ +int ghes_notify_sea(void) { - unsigned long block_length, prealloc_records, prealloc_size; + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea); + int rv; - block_length = min_t(unsigned long, generic->error_block_length, - GHES_ESTATUS_MAX_SIZE); - prealloc_records = max_t(unsigned long, - generic->records_to_preallocate, 1); - prealloc_size = min_t(unsigned long, block_length * prealloc_records, - GHES_ESOURCE_PREALLOC_MAX_SIZE); + raw_spin_lock(&ghes_notify_lock_sea); + rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA); + raw_spin_unlock(&ghes_notify_lock_sea); - return prealloc_size; + return rv; } -static void ghes_estatus_pool_shrink(unsigned long len) +static void ghes_sea_add(struct ghes *ghes) { - ghes_estatus_pool_size_request -= PAGE_ALIGN(len); + mutex_lock(&ghes_list_mutex); + list_add_rcu(&ghes->list, &ghes_sea); + mutex_unlock(&ghes_list_mutex); } -static void ghes_nmi_add(struct ghes *ghes) +static void ghes_sea_remove(struct ghes *ghes) { - unsigned long len; + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + mutex_unlock(&ghes_list_mutex); + synchronize_rcu(); +} +#else /* CONFIG_ACPI_APEI_SEA */ +static inline void ghes_sea_add(struct ghes *ghes) { } +static inline void ghes_sea_remove(struct ghes *ghes) { } +#endif /* CONFIG_ACPI_APEI_SEA */ - len = ghes_esource_prealloc_size(ghes->generic); - ghes_estatus_pool_expand(len); +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +/* + * NMI may be triggered on any CPU, so ghes_in_nmi is used for + * having only one concurrent reader. + */ +static atomic_t ghes_in_nmi = ATOMIC_INIT(0); + +static LIST_HEAD(ghes_nmi); + +static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi); + int ret = NMI_DONE; + + if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) + return ret; + + raw_spin_lock(&ghes_notify_lock_nmi); + if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI)) + ret = NMI_HANDLED; + raw_spin_unlock(&ghes_notify_lock_nmi); + + atomic_dec(&ghes_in_nmi); + return ret; +} + +static void ghes_nmi_add(struct ghes *ghes) +{ mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_nmi)) register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); @@ -1004,8 +1481,6 @@ static void ghes_nmi_add(struct ghes *ghes) static void ghes_nmi_remove(struct ghes *ghes) { - unsigned long len; - mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); if (list_empty(&ghes_nmi)) @@ -1016,24 +1491,79 @@ static void ghes_nmi_remove(struct ghes *ghes) * freed after NMI handler finishes. */ synchronize_rcu(); - len = ghes_esource_prealloc_size(ghes->generic); - ghes_estatus_pool_shrink(len); } +#else /* CONFIG_HAVE_ACPI_APEI_NMI */ +static inline void ghes_nmi_add(struct ghes *ghes) { } +static inline void ghes_nmi_remove(struct ghes *ghes) { } +#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static void ghes_nmi_init_cxt(void) { init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } -#else /* CONFIG_HAVE_ACPI_APEI_NMI */ -static inline void ghes_nmi_add(struct ghes *ghes) { } -static inline void ghes_nmi_remove(struct ghes *ghes) { } -static inline void ghes_nmi_init_cxt(void) { } -#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ + +static int __ghes_sdei_callback(struct ghes *ghes, + enum fixed_addresses fixmap_idx) +{ + if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) { + irq_work_queue(&ghes_proc_irq_work); + + return 0; + } + + return -ENOENT; +} + +static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs, + void *arg) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sdei_normal); + err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL); + raw_spin_unlock(&ghes_notify_lock_sdei_normal); + + return err; +} + +static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs, + void *arg) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sdei_critical); + err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL); + raw_spin_unlock(&ghes_notify_lock_sdei_critical); + + return err; +} + +static int apei_sdei_register_ghes(struct ghes *ghes) +{ + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) + return -EOPNOTSUPP; + + return sdei_register_ghes(ghes, ghes_sdei_normal_callback, + ghes_sdei_critical_callback); +} + +static int apei_sdei_unregister_ghes(struct ghes *ghes) +{ + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) + return -EOPNOTSUPP; + + return sdei_unregister_ghes(ghes); +} static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; + unsigned long flags; int rc = -EINVAL; @@ -1064,22 +1594,28 @@ static int ghes_probe(struct platform_device *ghes_dev) goto err; } break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) { + pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n", + generic->header.source_id); + goto err; + } + break; case ACPI_HEST_NOTIFY_LOCAL: - pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", - generic->header.source_id); + pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", + generic->header.source_id); goto err; default: - pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", - generic->notify.type, generic->header.source_id); + pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); goto err; } rc = -EIO; if (generic->error_block_length < sizeof(struct acpi_hest_generic_status)) { - pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", - generic->error_block_length, - generic->header.source_id); + pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", + generic->error_block_length, generic->header.source_id); goto err; } ghes = ghes_new(generic); @@ -1091,7 +1627,7 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); + timer_setup(&ghes->timer, ghes_poll_func, 0); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: @@ -1127,16 +1663,27 @@ static int ghes_probe(struct platform_device *ghes_dev) case ACPI_HEST_NOTIFY_NMI: ghes_nmi_add(ghes); break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + rc = apei_sdei_register_ghes(ghes); + if (rc) + goto err; + break; default: BUG(); } platform_set_drvdata(ghes_dev, ghes); - ghes_edac_register(ghes, &ghes_dev->dev); + ghes->dev = &ghes_dev->dev; + + mutex_lock(&ghes_devs_mutex); + list_add_tail(&ghes->elist, &ghes_devs); + mutex_unlock(&ghes_devs_mutex); /* Handle any pending errors right away */ + spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return 0; @@ -1148,8 +1695,9 @@ err: return rc; } -static int ghes_remove(struct platform_device *ghes_dev) +static void ghes_remove(struct platform_device *ghes_dev) { + int rc; struct ghes *ghes; struct acpi_hest_generic *generic; @@ -1159,7 +1707,7 @@ static int ghes_remove(struct platform_device *ghes_dev) ghes->flags |= GHES_EXITING; switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - del_timer_sync(&ghes->timer); + timer_shutdown_sync(&ghes->timer); break; case ACPI_HEST_NOTIFY_EXTERNAL: free_irq(ghes->irq, ghes); @@ -1182,6 +1730,18 @@ static int ghes_remove(struct platform_device *ghes_dev) case ACPI_HEST_NOTIFY_NMI: ghes_nmi_remove(ghes); break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + rc = apei_sdei_unregister_ghes(ghes); + if (rc) { + /* + * Returning early results in a resource leak, but we're + * only here if stopping the hardware failed. + */ + dev_err(&ghes_dev->dev, "Failed to unregister ghes (%pe)\n", + ERR_PTR(rc)); + return; + } + break; default: BUG(); break; @@ -1189,13 +1749,11 @@ static int ghes_remove(struct platform_device *ghes_dev) ghes_fini(ghes); - ghes_edac_unregister(ghes); + mutex_lock(&ghes_devs_mutex); + list_del(&ghes->elist); + mutex_unlock(&ghes_devs_mutex); kfree(ghes); - - platform_set_drvdata(ghes_dev, NULL); - - return 0; } static struct platform_driver ghes_platform_driver = { @@ -1206,42 +1764,35 @@ static struct platform_driver ghes_platform_driver = { .remove = ghes_remove, }; -static int __init ghes_init(void) +void __init acpi_ghes_init(void) { int rc; + acpi_sdei_init(); + if (acpi_disabled) - return -ENODEV; + return; switch (hest_disable) { case HEST_NOT_FOUND: - return -ENODEV; + return; case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); - return -EINVAL; + return; default: break; } if (ghes_disable) { pr_info(GHES_PFX "GHES is not enabled!\n"); - return -EINVAL; + return; } ghes_nmi_init_cxt(); - rc = ghes_estatus_pool_init(); - if (rc) - goto err; - - rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * - GHES_ESTATUS_CACHE_ALLOCED_MAX); - if (rc) - goto err_pool_exit; - rc = platform_driver_register(&ghes_platform_driver); if (rc) - goto err_pool_exit; + return; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) @@ -1252,11 +1803,44 @@ static int __init ghes_init(void) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); else pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); +} - return 0; -err_pool_exit: - ghes_estatus_pool_exit(); -err: - return rc; +/* + * Known x86 systems that prefer GHES error reporting: + */ +static struct acpi_platform_list plat_list[] = { + {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, + { } /* End */ +}; + +struct list_head *ghes_get_devices(void) +{ + int idx = -1; + + if (IS_ENABLED(CONFIG_X86)) { + idx = acpi_match_platform_list(plat_list); + if (idx < 0) { + if (!ghes_edac_force_enable) + return NULL; + + pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n"); + } + } else if (list_empty(&ghes_devs)) { + return NULL; + } + + return &ghes_devs; +} +EXPORT_SYMBOL_GPL(ghes_get_devices); + +void ghes_register_report_chain(struct notifier_block *nb) +{ + atomic_notifier_chain_register(&ghes_report_chain, nb); +} +EXPORT_SYMBOL_GPL(ghes_register_report_chain); + +void ghes_unregister_report_chain(struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&ghes_report_chain, nb); } -device_initcall(ghes_init); +EXPORT_SYMBOL_GPL(ghes_unregister_report_chain); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index b1e9f81ebeea..20d757687e3d 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * APEI Hardware Error Souce Table support + * APEI Hardware Error Source Table support * * HEST describes error sources in detail; communicates operational * parameters (i.e. severity levels, masking bits, and threshold @@ -12,15 +13,6 @@ * * Copyright 2009 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation; - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -32,6 +24,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <acpi/apei.h> +#include <acpi/ghes.h> #include "apei-internal.h" @@ -44,6 +37,20 @@ EXPORT_SYMBOL_GPL(hest_disable); static struct acpi_table_hest *__read_mostly hest_tab; +/* + * Since GHES_ASSIST is not supported, skip initialization of GHES_ASSIST + * structures for MCA. + * During HEST parsing, detected MCA error sources are cached from early + * table entries so that the Flags and Source Id fields from these cached + * values are then referred to in later table entries to determine if the + * encountered GHES_ASSIST structure should be initialized. + */ +static struct { + struct acpi_hest_ia_corrected *cmc; + struct acpi_hest_ia_machine_check *mc; + struct acpi_hest_ia_deferred_check *dmc; +} mces; + static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, @@ -53,8 +60,15 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), [ACPI_HEST_TYPE_GENERIC_ERROR_V2] = sizeof(struct acpi_hest_generic_v2), + [ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1, }; +static inline bool is_generic_error(struct acpi_hest_header *hest_hdr) +{ + return hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR || + hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; +} + static int hest_esrc_len(struct acpi_hest_header *hest_hdr) { u16 hest_type = hest_hdr->type; @@ -70,18 +84,57 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr) cmc = (struct acpi_hest_ia_corrected *)hest_hdr; len = sizeof(*cmc) + cmc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); + mces.cmc = cmc; } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { struct acpi_hest_ia_machine_check *mc; mc = (struct acpi_hest_ia_machine_check *)hest_hdr; len = sizeof(*mc) + mc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); + mces.mc = mc; + } else if (hest_type == ACPI_HEST_TYPE_IA32_DEFERRED_CHECK) { + struct acpi_hest_ia_deferred_check *mc; + mc = (struct acpi_hest_ia_deferred_check *)hest_hdr; + len = sizeof(*mc) + mc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); + mces.dmc = mc; } BUG_ON(len == -1); return len; }; -int apei_hest_parse(apei_hest_func_t func, void *data) +/* + * GHES and GHESv2 structures share the same format, starting from + * Source Id and ending in Error Status Block Length (inclusive). + */ +static bool is_ghes_assist_struct(struct acpi_hest_header *hest_hdr) +{ + struct acpi_hest_generic *ghes; + u16 related_source_id; + + if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR && + hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2) + return false; + + ghes = (struct acpi_hest_generic *)hest_hdr; + related_source_id = ghes->related_source_id; + + if (mces.cmc && mces.cmc->flags & ACPI_HEST_GHES_ASSIST && + related_source_id == mces.cmc->header.source_id) + return true; + if (mces.mc && mces.mc->flags & ACPI_HEST_GHES_ASSIST && + related_source_id == mces.mc->header.source_id) + return true; + if (mces.dmc && mces.dmc->flags & ACPI_HEST_GHES_ASSIST && + related_source_id == mces.dmc->header.source_id) + return true; + + return false; +} + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); + +static int apei_hest_parse(apei_hest_func_t func, void *data) { struct acpi_hest_header *hest_hdr; int i, rc, len; @@ -93,20 +146,25 @@ int apei_hest_parse(apei_hest_func_t func, void *data) for (i = 0; i < hest_tab->error_source_count; i++) { len = hest_esrc_len(hest_hdr); if (!len) { - pr_warning(FW_WARN HEST_PFX - "Unknown or unused hardware error source " - "type: %d for hardware error source: %d.\n", - hest_hdr->type, hest_hdr->source_id); + pr_warn(FW_WARN HEST_PFX + "Unknown or unused hardware error source " + "type: %d for hardware error source: %d.\n", + hest_hdr->type, hest_hdr->source_id); return -EINVAL; } if ((void *)hest_hdr + len > (void *)hest_tab + hest_tab->header.length) { - pr_warning(FW_BUG HEST_PFX + pr_warn(FW_BUG HEST_PFX "Table contents overflow for hardware error source: %d.\n", hest_hdr->source_id); return -EINVAL; } + if (is_ghes_assist_struct(hest_hdr)) { + hest_hdr = (void *)hest_hdr + len; + continue; + } + rc = func(hest_hdr, data); if (rc) return rc; @@ -116,7 +174,6 @@ int apei_hest_parse(apei_hest_func_t func, void *data) return 0; } -EXPORT_SYMBOL_GPL(apei_hest_parse); /* * Check if firmware advertises firmware first mode. We need FF bit to be set @@ -142,8 +199,7 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void { int *count = data; - if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR || - hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2) + if (is_generic_error(hest_hdr)) (*count)++; return 0; } @@ -154,8 +210,7 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) struct ghes_arr *ghes_arr = data; int rc, i; - if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR && - hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2) + if (!is_generic_error(hest_hdr)) return 0; if (!((struct acpi_hest_generic *)hest_hdr)->enabled) @@ -165,8 +220,8 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) ghes_dev = ghes_arr->ghes_devs[i]; hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data; if (hdr->source_id == hest_hdr->source_id) { - pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n", - hdr->source_id); + pr_warn(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n", + hdr->source_id); return -EIO; } } @@ -203,6 +258,11 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) rc = apei_hest_parse(hest_parse_ghes, &ghes_arr); if (rc) goto err; + + rc = ghes_estatus_pool_init(ghes_count); + if (rc) + goto err; + out: kfree(ghes_arr.ghes_devs); return rc; @@ -215,7 +275,7 @@ err: static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); @@ -223,7 +283,7 @@ __setup("hest_disable", setup_hest_disable); void __init acpi_hest_init(void) { acpi_status status; - int rc = -ENODEV; + int rc; unsigned int ghes_count = 0; if (hest_disable) { @@ -239,8 +299,8 @@ void __init acpi_hest_init(void) } else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(HEST_PFX "Failed to get table, %s\n", msg); - rc = -EINVAL; - goto err; + hest_disable = HEST_DISABLED; + return; } rc = apei_hest_parse(hest_parse_cmc, NULL); @@ -251,7 +311,9 @@ void __init acpi_hest_init(void) rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); if (rc) goto err; - rc = hest_ghes_dev_register(ghes_count); + + if (ghes_count) + rc = hest_ghes_dev_register(ghes_count); if (rc) goto err; } @@ -260,4 +322,5 @@ void __init acpi_hest_init(void) return; err: hest_disable = HEST_DISABLED; + acpi_put_table((struct acpi_table_header *)hest_tab); } diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 5a6f80fce0d6..f2fd79f22e7d 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # ACPI Configuration for ARM64 # @@ -7,3 +8,19 @@ config ACPI_IORT config ACPI_GTDT bool + +config ACPI_AGDI + bool "Arm Generic Diagnostic Dump and Reset Device Interface" + depends on ARM_SDE_INTERFACE + help + Arm Generic Diagnostic Dump and Reset Device Interface (AGDI) is + a standard that enables issuing a non-maskable diagnostic dump and + reset command. + + If set, the kernel parses AGDI table and listens for the command. + +config ACPI_APMT + bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 1017def2ea12..9390b57cb564 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,2 +1,11 @@ -obj-$(CONFIG_ACPI_IORT) += iort.o +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ACPI_AGDI) += agdi.o +obj-$(CONFIG_ACPI_APMT) += apmt.o +obj-$(CONFIG_ACPI_FFH) += ffh.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o +obj-$(CONFIG_ACPI_IORT) += iort.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o +obj-$(CONFIG_ARM_AMBA) += amba.o +obj-y += dma.o init.o +obj-y += thermal_cpufreq.o diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c new file mode 100644 index 000000000000..e0df3daa4abf --- /dev/null +++ b/drivers/acpi/arm64/agdi.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file implements handling of + * Arm Generic Diagnostic Dump and Reset Interface table (AGDI) + * + * Copyright (c) 2022, Ampere Computing LLC + */ + +#define pr_fmt(fmt) "ACPI: AGDI: " fmt + +#include <linux/acpi.h> +#include <linux/arm_sdei.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include "init.h" + +struct agdi_data { + int sdei_event; +}; + +static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg) +{ + nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued"); + return 0; +} + +static int agdi_sdei_probe(struct platform_device *pdev, + struct agdi_data *adata) +{ + int err; + + err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev); + if (err) { + dev_err(&pdev->dev, "Failed to register for SDEI event %d", + adata->sdei_event); + return err; + } + + err = sdei_event_enable(adata->sdei_event); + if (err) { + sdei_event_unregister(adata->sdei_event); + dev_err(&pdev->dev, "Failed to enable event %d\n", + adata->sdei_event); + return err; + } + + return 0; +} + +static int agdi_probe(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + + if (!adata) + return -EINVAL; + + return agdi_sdei_probe(pdev, adata); +} + +static void agdi_remove(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + int err, i; + + err = sdei_event_disable(adata->sdei_event); + if (err) { + dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n", + adata->sdei_event, ERR_PTR(err)); + return; + } + + for (i = 0; i < 3; i++) { + err = sdei_event_unregister(adata->sdei_event); + if (err != -EINPROGRESS) + break; + + schedule(); + } + + if (err) + dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n", + adata->sdei_event, ERR_PTR(err)); +} + +static struct platform_driver agdi_driver = { + .driver = { + .name = "agdi", + }, + .probe = agdi_probe, + .remove = agdi_remove, +}; + +void __init acpi_agdi_init(void) +{ + struct acpi_table_agdi *agdi_table; + struct agdi_data pdata; + struct platform_device *pdev; + acpi_status status; + + status = acpi_get_table(ACPI_SIG_AGDI, 0, + (struct acpi_table_header **) &agdi_table); + if (ACPI_FAILURE(status)) + return; + + if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) { + pr_warn("Interrupt signaling is not supported"); + goto err_put_table; + } + + pdata.sdei_event = agdi_table->sdei_event; + + pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata)); + if (IS_ERR(pdev)) + goto err_put_table; + + if (platform_driver_register(&agdi_driver)) + platform_device_unregister(pdev); + +err_put_table: + acpi_put_table((struct acpi_table_header *)agdi_table); +} diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/arm64/amba.c index 7f77c071709a..1350083bce5f 100644 --- a/drivers/acpi/acpi_amba.c +++ b/drivers/acpi/arm64/amba.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for platform bus type. * * Copyright (C) 2015, Linaro Ltd * Author: Graeme Gregory <graeme.gregory@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> @@ -20,20 +17,17 @@ #include <linux/kernel.h> #include <linux/module.h> -#include "internal.h" +#include "init.h" static const struct acpi_device_id amba_id_list[] = { {"ARMH0061", 0}, /* PL061 GPIO Device */ + {"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */ {"", 0}, }; static void amba_register_dummy_clk(void) { - static struct clk *amba_dummy_clk; - - /* If clock already registered */ - if (amba_dummy_clk) - return; + struct clk *amba_dummy_clk; amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0); clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL); @@ -42,6 +36,7 @@ static void amba_register_dummy_clk(void) static int amba_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { + struct acpi_device *parent = acpi_dev_parent(adev); struct amba_device *dev; struct resource_entry *rentry; struct list_head resource_list; @@ -70,6 +65,7 @@ static int amba_handler_attach(struct acpi_device *adev, case IORESOURCE_MEM: if (!address_found) { dev->res = *rentry->res; + dev->res.name = dev_name(&dev->dev); address_found = true; } break; @@ -90,10 +86,10 @@ static int amba_handler_attach(struct acpi_device *adev, * attached to it, that physical device should be the parent of * the amba device we are about to create. */ - if (adev->parent) - dev->dev.parent = acpi_get_first_physical_node(adev->parent); + if (parent) + dev->dev.parent = acpi_get_first_physical_node(parent); - ACPI_COMPANION_SET(&dev->dev, adev); + device_set_node(&dev->dev, acpi_fwnode_handle(adev)); ret = amba_device_add(dev, &iomem_resource); if (ret) { diff --git a/drivers/acpi/arm64/apmt.c b/drivers/acpi/arm64/apmt.c new file mode 100644 index 000000000000..bb010f6164e5 --- /dev/null +++ b/drivers/acpi/arm64/apmt.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM APMT table support. + * Design document number: ARM DEN0117. + * + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + * + */ + +#define pr_fmt(fmt) "ACPI: APMT: " fmt + +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include "init.h" + +#define DEV_NAME "arm-cs-arch-pmu" + +/* There can be up to 3 resources: page 0 and 1 address, and interrupt. */ +#define DEV_MAX_RESOURCE_COUNT 3 + +/* Root pointer to the mapped APMT table */ +static struct acpi_table_header *apmt_table; + +static int __init apmt_init_resources(struct resource *res, + struct acpi_apmt_node *node) +{ + int irq, trigger; + int num_res = 0; + + res[num_res].start = node->base_address0; + res[num_res].end = node->base_address0 + SZ_4K - 1; + res[num_res].flags = IORESOURCE_MEM; + + num_res++; + + if (node->flags & ACPI_APMT_FLAGS_DUAL_PAGE) { + res[num_res].start = node->base_address1; + res[num_res].end = node->base_address1 + SZ_4K - 1; + res[num_res].flags = IORESOURCE_MEM; + + num_res++; + } + + if (node->ovflw_irq != 0) { + trigger = (node->ovflw_irq_flags & ACPI_APMT_OVFLW_IRQ_FLAGS_MODE); + trigger = (trigger == ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL) ? + ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; + irq = acpi_register_gsi(NULL, node->ovflw_irq, trigger, + ACPI_ACTIVE_HIGH); + + if (irq <= 0) { + pr_warn("APMT could not register gsi hwirq %d\n", irq); + return num_res; + } + + res[num_res].start = irq; + res[num_res].end = irq; + res[num_res].flags = IORESOURCE_IRQ; + + num_res++; + } + + return num_res; +} + +/** + * apmt_add_platform_device() - Allocate a platform device for APMT node + * @node: Pointer to device ACPI APMT node + * @fwnode: fwnode associated with the APMT node + * + * Returns: 0 on success, <0 failure + */ +static int __init apmt_add_platform_device(struct acpi_apmt_node *node, + struct fwnode_handle *fwnode) +{ + struct platform_device *pdev; + int ret, count; + struct resource res[DEV_MAX_RESOURCE_COUNT]; + + pdev = platform_device_alloc(DEV_NAME, PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; + + memset(res, 0, sizeof(res)); + + count = apmt_init_resources(res, node); + + ret = platform_device_add_resources(pdev, res, count); + if (ret) + goto dev_put; + + /* + * Add a copy of APMT node pointer to platform_data to be used to + * retrieve APMT data information. + */ + ret = platform_device_add_data(pdev, &node, sizeof(node)); + if (ret) + goto dev_put; + + pdev->dev.fwnode = fwnode; + + ret = platform_device_add(pdev); + + if (ret) + goto dev_put; + + return 0; + +dev_put: + platform_device_put(pdev); + + return ret; +} + +static int __init apmt_init_platform_devices(void) +{ + struct acpi_apmt_node *apmt_node; + struct acpi_table_apmt *apmt; + struct fwnode_handle *fwnode; + u64 offset, end; + int ret; + + /* + * apmt_table and apmt both point to the start of APMT table, but + * have different struct types + */ + apmt = (struct acpi_table_apmt *)apmt_table; + offset = sizeof(*apmt); + end = apmt->header.length; + + while (offset < end) { + apmt_node = ACPI_ADD_PTR(struct acpi_apmt_node, apmt, + offset); + + fwnode = acpi_alloc_fwnode_static(); + if (!fwnode) + return -ENOMEM; + + ret = apmt_add_platform_device(apmt_node, fwnode); + if (ret) { + acpi_free_fwnode_static(fwnode); + return ret; + } + + offset += apmt_node->length; + } + + return 0; +} + +void __init acpi_apmt_init(void) +{ + acpi_status status; + int ret; + + /** + * APMT table nodes will be used at runtime after the apmt init, + * so we don't need to call acpi_put_table() to release + * the APMT table mapping. + */ + status = acpi_get_table(ACPI_SIG_APMT, 0, &apmt_table); + + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) { + const char *msg = acpi_format_exception(status); + + pr_err("Failed to get APMT table, %s\n", msg); + } + + return; + } + + ret = apmt_init_platform_devices(); + if (ret) { + pr_err("Failed to initialize APMT platform devices, ret: %d\n", ret); + acpi_put_table(apmt_table); + } +} diff --git a/drivers/acpi/arm64/cpuidle.c b/drivers/acpi/arm64/cpuidle.c new file mode 100644 index 000000000000..801f9c450142 --- /dev/null +++ b/drivers/acpi/arm64/cpuidle.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 CPU idle arch support + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + */ + +#include <linux/acpi.h> +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/psci.h> +#include <acpi/processor.h> + +#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) + +static int psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return psci_acpi_cpu_init_idle(cpu); +} + +__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); + else + return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); +} diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c new file mode 100644 index 000000000000..f30f138352b7 --- /dev/null +++ b/drivers/acpi/arm64/dma.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include <linux/acpi_iort.h> +#include <linux/device.h> +#include <linux/dma-direct.h> + +void acpi_arch_dma_setup(struct device *dev) +{ + int ret; + u64 end, mask; + const struct bus_dma_region *map = NULL; + + /* + * If @dev is expected to be DMA-capable then the bus code that created + * it should have initialised its dma_mask pointer by this point. For + * now, we'll continue the legacy behaviour of coercing it to the + * coherent mask if not, but we'll no longer do so quietly. + */ + if (!dev->dma_mask) { + dev_warn(dev, "DMA mask not set\n"); + dev->dma_mask = &dev->coherent_dma_mask; + } + + if (dev->coherent_dma_mask) + end = dev->coherent_dma_mask; + else + end = (1ULL << 32) - 1; + + if (dev->dma_range_map) { + dev_dbg(dev, "dma_range_map already set\n"); + return; + } + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { + end = dma_range_map_max(map); + dev->dma_range_map = map; + } + + if (ret == -ENODEV) + ret = iort_dma_get_ranges(dev, &end); + if (!ret) { + /* + * Limit coherent and dma mask based on size retrieved from + * firmware. + */ + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); + *dev->dma_mask = min(*dev->dma_mask, mask); + } +} diff --git a/drivers/acpi/arm64/ffh.c b/drivers/acpi/arm64/ffh.c new file mode 100644 index 000000000000..877edc6557e9 --- /dev/null +++ b/drivers/acpi/arm64/ffh.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include <linux/arm-smccc.h> +#include <linux/slab.h> + +/* + * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as + * specified in https://developer.arm.com/docs/den0048/latest + */ +struct acpi_ffh_data { + struct acpi_ffh_info info; + void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *args, + struct arm_smccc_quirk *res); + void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +}; + +int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) +{ + enum arm_smccc_conduit conduit; + struct acpi_ffh_data *ffh_ctxt; + + if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) + return -EOPNOTSUPP; + + conduit = arm_smccc_1_1_get_conduit(); + if (conduit == SMCCC_CONDUIT_NONE) { + pr_err("%s: invalid SMCCC conduit\n", __func__); + return -EOPNOTSUPP; + } + + ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); + if (!ffh_ctxt) + return -ENOMEM; + + if (conduit == SMCCC_CONDUIT_SMC) { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; + } else { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; + } + + memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); + + *region_ctxt = ffh_ctxt; + return AE_OK; +} + +static bool acpi_ffh_smccc_owner_allowed(u32 fid) +{ + int owner = ARM_SMCCC_OWNER_NUM(fid); + + if (owner == ARM_SMCCC_OWNER_STANDARD || + owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) + return true; + + return false; +} + +int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) +{ + int ret = 0; + struct acpi_ffh_data *ffh_ctxt = region_context; + + if (ffh_ctxt->info.offset == 0) { + /* SMC/HVC 32bit call */ + struct arm_smccc_res res; + u32 a[8] = { 0 }, *ptr = (u32 *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || + !acpi_ffh_smccc_owner_allowed(*ptr) || + ffh_ctxt->info.length > 32) { + ret = AE_ERROR; + } else { + int idx, len = ffh_ctxt->info.length >> 2; + + for (idx = 0; idx < len; idx++) + a[idx] = *(ptr + idx); + + ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], + a[5], a[6], a[7], &res, NULL); + memcpy(value, &res, sizeof(res)); + } + + } else if (ffh_ctxt->info.offset == 1) { + /* SMC/HVC 64bit call */ + struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || + !acpi_ffh_smccc_owner_allowed(r->a0) || + ffh_ctxt->info.length > sizeof(*r)) { + ret = AE_ERROR; + } else { + ffh_ctxt->invoke_ffh64_fn(r, r); + memcpy(value, r, ffh_ctxt->info.length); + } + } else { + ret = AE_ERROR; + } + + return ret; +} diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index 92f9edf9d11e..ffc867bac2d6 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ARM Specific GTDT table Support * @@ -5,10 +6,6 @@ * Author: Daniel Lezcano <daniel.lezcano@linaro.org> * Fu Wei <fu.wei@linaro.org> * Hanjun Guo <hanjun.guo@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> @@ -39,19 +36,25 @@ struct acpi_gtdt_descriptor { static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata; -static inline void *next_platform_timer(void *platform_timer) +static __init bool platform_timer_valid(void *platform_timer) { struct acpi_gtdt_header *gh = platform_timer; - platform_timer += gh->length; - if (platform_timer < acpi_gtdt_desc.gtdt_end) - return platform_timer; + return (platform_timer >= (void *)(acpi_gtdt_desc.gtdt + 1) && + platform_timer < acpi_gtdt_desc.gtdt_end && + gh->length != 0 && + platform_timer + gh->length <= acpi_gtdt_desc.gtdt_end); +} + +static __init void *next_platform_timer(void *platform_timer) +{ + struct acpi_gtdt_header *gh = platform_timer; - return NULL; + return platform_timer + gh->length; } #define for_each_platform_timer(_g) \ - for (_g = acpi_gtdt_desc.platform_timer; _g; \ + for (_g = acpi_gtdt_desc.platform_timer; platform_timer_valid(_g);\ _g = next_platform_timer(_g)) static inline bool is_timer_block(void *platform_timer) @@ -160,6 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table, { void *platform_timer; struct acpi_table_gtdt *gtdt; + u32 cnt = 0; gtdt = container_of(table, struct acpi_table_gtdt, header); acpi_gtdt_desc.gtdt = gtdt; @@ -179,14 +183,22 @@ int __init acpi_gtdt_init(struct acpi_table_header *table, return 0; } - platform_timer = (void *)gtdt + gtdt->platform_timer_offset; - if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) { - pr_err(FW_BUG "invalid timer data.\n"); - return -EINVAL; + acpi_gtdt_desc.platform_timer = (void *)gtdt + gtdt->platform_timer_offset; + for_each_platform_timer(platform_timer) + cnt++; + + if (cnt != gtdt->platform_timer_count) { + cnt = min(cnt, gtdt->platform_timer_count); + pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt); + } + + if (!cnt) { + acpi_gtdt_desc.platform_timer = NULL; + return 0; } - acpi_gtdt_desc.platform_timer = platform_timer; + if (platform_timer_count) - *platform_timer_count = gtdt->platform_timer_count; + *platform_timer_count = cnt; return 0; } @@ -286,45 +298,11 @@ error: if (frame->virt_irq > 0) acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt); frame->virt_irq = 0; - } while (i-- >= 0 && gtdt_frame--); + } while (i-- > 0 && gtdt_frame--); return -EINVAL; } -/** - * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table. - * @timer_mem: The pointer to the array of struct arch_timer_mem for returning - * the result of parsing. The element number of this array should - * be platform_timer_count(the total number of platform timers). - * @timer_count: It points to a integer variable which is used for storing the - * number of GT blocks we have parsed. - * - * Return: 0 if success, -EINVAL/-ENODEV if error. - */ -int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, - int *timer_count) -{ - int ret; - void *platform_timer; - - *timer_count = 0; - for_each_platform_timer(platform_timer) { - if (is_timer_block(platform_timer)) { - ret = gtdt_parse_timer_block(platform_timer, timer_mem); - if (ret) - return ret; - timer_mem++; - (*timer_count)++; - } - } - - if (*timer_count) - pr_info("found %d memory-mapped timer block(s).\n", - *timer_count); - - return 0; -} - /* * Initialize a SBSA generic Watchdog platform device info from GTDT */ @@ -332,7 +310,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, int index) { struct platform_device *pdev; - int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + int irq; /* * According to SBSA specification the size of refresh and control @@ -341,7 +319,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, struct resource res[] = { DEFINE_RES_MEM(wd->control_frame_address, SZ_4K), DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K), - DEFINE_RES_IRQ(irq), + {}, }; int nr_res = ARRAY_SIZE(res); @@ -351,10 +329,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, if (!(wd->refresh_frame_address && wd->control_frame_address)) { pr_err(FW_BUG "failed to get the Watchdog base address.\n"); - acpi_unregister_gsi(wd->timer_interrupt); return -EINVAL; } + irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + res[2] = DEFINE_RES_IRQ(irq); if (irq <= 0) { pr_warn("failed to map the Watchdog interrupt.\n"); nr_res--; @@ -367,18 +346,19 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, */ pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res); if (IS_ERR(pdev)) { - acpi_unregister_gsi(wd->timer_interrupt); + if (irq > 0) + acpi_unregister_gsi(wd->timer_interrupt); return PTR_ERR(pdev); } return 0; } -static int __init gtdt_sbsa_gwdt_init(void) +static int __init gtdt_platform_timer_init(void) { void *platform_timer; struct acpi_table_header *table; - int ret, timer_count, gwdt_count = 0; + int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0; if (acpi_disabled) return 0; @@ -397,21 +377,44 @@ static int __init gtdt_sbsa_gwdt_init(void) */ ret = acpi_gtdt_init(table, &timer_count); if (ret || !timer_count) - return ret; + goto out_put_gtdt; for_each_platform_timer(platform_timer) { + ret = 0; + if (is_non_secure_watchdog(platform_timer)) { ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count); if (ret) - break; + continue; gwdt_count++; + } else if (is_timer_block(platform_timer)) { + struct arch_timer_mem atm = {}; + struct platform_device *pdev; + + ret = gtdt_parse_timer_block(platform_timer, &atm); + if (ret) + continue; + + pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer", + mmio_timer_count, &atm, + sizeof(atm)); + if (IS_ERR(pdev)) { + pr_err("Can't register timer %d\n", mmio_timer_count); + continue; + } + + mmio_timer_count++; } } if (gwdt_count) pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count); + if (mmio_timer_count) + pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count); +out_put_gtdt: + acpi_put_table(table); return ret; } -device_initcall(gtdt_sbsa_gwdt_init); +device_initcall(gtdt_platform_timer_init); diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c new file mode 100644 index 000000000000..7a47d8095a7d --- /dev/null +++ b/drivers/acpi/arm64/init.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include "init.h" + +void __init acpi_arch_init(void) +{ + if (IS_ENABLED(CONFIG_ACPI_AGDI)) + acpi_agdi_init(); + if (IS_ENABLED(CONFIG_ACPI_APMT)) + acpi_apmt_init(); + if (IS_ENABLED(CONFIG_ACPI_IORT)) + acpi_iort_init(); + if (IS_ENABLED(CONFIG_ARM_AMBA)) + acpi_amba_init(); +} diff --git a/drivers/acpi/arm64/init.h b/drivers/acpi/arm64/init.h new file mode 100644 index 000000000000..dcc277977194 --- /dev/null +++ b/drivers/acpi/arm64/init.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include <linux/init.h> + +void __init acpi_agdi_init(void); +void __init acpi_apmt_init(void); +void __init acpi_iort_init(void); +void __init acpi_amba_init(void); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e48894e002ba..65f0f56ad753 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016, Semihalf * Author: Tomasz Nowicki <tn@semihalf.com> * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * * This file implements early detection/parsing of I/O mapping * reported to OS through firmware via I/O Remapping Table (IORT) * IORT document number: ARM DEN 0049A @@ -19,12 +11,15 @@ #define pr_fmt(fmt) "ACPI: IORT: " fmt #include <linux/acpi_iort.h> +#include <linux/bitfield.h> #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/dma-map-ops.h> +#include "init.h" #define IORT_TYPE_MASK(type) (1 << (type)) #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) @@ -50,7 +45,7 @@ static DEFINE_SPINLOCK(iort_fwnode_lock); * iort_set_fwnode() - Create iort_fwnode and use it to register * iommu data in the iort_fwnode_list * - * @node: IORT table node associated with the IOMMU + * @iort_node: IORT table node associated with the IOMMU * @fwnode: fwnode associated with the IORT node * * Returns: 0 on success @@ -271,15 +266,31 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node, if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_device *adev = to_acpi_device_node(dev->fwnode); + struct acpi_device *adev; struct acpi_iort_named_component *ncomp; + struct device *nc_dev = dev; + + /* + * Walk the device tree to find a device with an + * ACPI companion; there is no point in scanning + * IORT for a device matching a named component if + * the device does not have an ACPI companion to + * start with. + */ + do { + adev = ACPI_COMPANION(nc_dev); + if (adev) + break; + + nc_dev = nc_dev->parent; + } while (nc_dev); if (!adev) goto out; status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); if (ACPI_FAILURE(status)) { - dev_warn(dev, "Can't get device full path name\n"); + dev_warn(nc_dev, "Can't get device full path name\n"); goto out; } @@ -307,7 +318,7 @@ out: } static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, - u32 *rid_out) + u32 *rid_out, bool check_overlap) { /* Single mapping does not care for input id */ if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { @@ -323,10 +334,36 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, } if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + (rid_in > map->input_base + map->id_count)) return -ENXIO; + if (check_overlap) { + /* + * We already found a mapping for this input ID at the end of + * another region. If it coincides with the start of this + * region, we assume the prior match was due to the off-by-1 + * issue mentioned below, and allow it to be superseded. + * Otherwise, things are *really* broken, and we just disregard + * duplicate matches entirely to retain compatibility. + */ + pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n", + map, rid_in); + if (rid_in != map->input_base) + return -ENXIO; + + pr_err(FW_BUG "applying workaround.\n"); + } + *rid_out = map->output_base + (rid_in - map->input_base); + + /* + * Due to confusion regarding the meaning of the id_count field (which + * carries the number of IDs *minus 1*), we may have to disregard this + * match if it is at the end of the range, and overlaps with the start + * of another one. + */ + if (map->id_count > 0 && rid_in == map->input_base + map->id_count) + return -EAGAIN; return 0; } @@ -356,7 +393,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || - node->type == ACPI_IORT_NODE_SMMU_V3) { + node->type == ACPI_IORT_NODE_SMMU_V3 || + node->type == ACPI_IORT_NODE_PMCG) { *id_out = map->output_base; return parent; } @@ -365,9 +403,14 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, return NULL; } +#ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID +#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4) +#endif + static int iort_get_id_mapping_index(struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + struct acpi_iort_pmcg *pmcg; switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: @@ -380,12 +423,16 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) smmu = (struct acpi_iort_smmu_v3 *)node->node_data; /* - * ID mapping index is only ignored if all interrupts are - * GSIV based + * Until IORT E.e (node rev. 5), the ID mapping index was + * defined to be valid unless all interrupts are GSIV-based. */ - if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv - && smmu->sync_gsiv) + if (node->revision < 5) { + if (smmu->event_gsiv && smmu->pri_gsiv && + smmu->gerr_gsiv && smmu->sync_gsiv) + return -EINVAL; + } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { return -EINVAL; + } if (smmu->id_mapping_index >= node->mapping_count) { pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", @@ -394,6 +441,12 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) } return smmu->id_mapping_index; + case ACPI_IORT_NODE_PMCG: + pmcg = (struct acpi_iort_pmcg *)node->node_data; + if (pmcg->overflow_gsiv || node->mapping_count == 0) + return -EINVAL; + + return 0; default: return -EINVAL; } @@ -408,7 +461,8 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, /* Parse the ID mapping tree to find specified node type */ while (node) { struct acpi_iort_id_mapping *map; - int i, index; + int i, index, rc = 0; + u32 out_ref = 0, map_id = id; if (IORT_TYPE_MASK(node->type) & type_mask) { if (id_out) @@ -442,15 +496,18 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, if (i == index) continue; - if (!iort_id_map(map, node->type, id, &id)) + rc = iort_id_map(map, node->type, map_id, &id, out_ref); + if (!rc) break; + if (rc == -EAGAIN) + out_ref = map->output_reference; } - if (i == node->mapping_count) + if (i == node->mapping_count && !out_ref) goto fail_map; node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, - map->output_reference); + rc ? out_ref : map->output_reference); } fail_map: @@ -503,7 +560,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) node = iort_get_iort_node(dev->fwnode); if (node) return node; - /* * if not, then it should be a platform device defined in * DSDT/SSDT (with Named Component node in IORT) @@ -512,32 +568,29 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) iort_match_node_callback, dev); } - /* Find a PCI root bus */ pbus = to_pci_dev(dev)->bus; - while (!pci_is_root_bus(pbus)) - pbus = pbus->parent; return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, iort_match_node_callback, &pbus->dev); } /** - * iort_msi_map_rid() - Map a MSI requester ID for a device + * iort_msi_map_id() - Map a MSI input ID for a device * @dev: The device for which the mapping is to be done. - * @req_id: The device requester ID. + * @input_id: The device input ID. * - * Returns: mapped MSI RID on success, input requester ID otherwise + * Returns: mapped MSI ID on success, input ID otherwise */ -u32 iort_msi_map_rid(struct device *dev, u32 req_id) +u32 iort_msi_map_id(struct device *dev, u32 input_id) { struct acpi_iort_node *node; u32 dev_id; node = iort_find_dev_node(dev); if (!node) - return req_id; + return input_id; - iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); + iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE); return dev_id; } @@ -594,13 +647,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) /** * iort_dev_find_its_id() - Find the ITS identifier for a device * @dev: The device. - * @req_id: Device's requester ID + * @id: Device's ID * @idx: Index of the ITS identifier list. * @its_id: ITS identifier. * * Returns: 0 on success, appropriate error value otherwise */ -static int iort_dev_find_its_id(struct device *dev, u32 req_id, +static int iort_dev_find_its_id(struct device *dev, u32 id, unsigned int idx, int *its_id) { struct acpi_iort_its_group *its; @@ -610,14 +663,14 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, if (!node) return -ENXIO; - node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); + node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE); if (!node) return -ENXIO; /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)node->node_data; - if (idx > its->its_count) { - dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", + if (idx >= its->its_count) { + dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", idx, its->its_count); return -ENXIO; } @@ -629,23 +682,25 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, /** * iort_get_device_domain() - Find MSI domain related to a device * @dev: The device. - * @req_id: Requester ID for the device. + * @id: Requester ID for the device. + * @bus_token: irq domain bus token. * * Returns: the MSI domain for this device, NULL otherwise */ -struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) +struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, + enum irq_domain_bus_token bus_token) { struct fwnode_handle *handle; int its_id; - if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) + if (iort_dev_find_its_id(dev, id, 0, &its_id)) return NULL; handle = iort_find_domain_token(its_id); if (!handle) return NULL; - return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); + return irq_find_matching_fwnode(handle, bus_token); } static void iort_set_device_domain(struct device *dev, @@ -741,98 +796,328 @@ void acpi_configure_pmsi_domain(struct device *dev) dev_set_msi_domain(dev, msi_domain); } -static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, - void *data) +#ifdef CONFIG_IOMMU_API +static void iort_rmr_free(struct device *dev, + struct iommu_resv_region *region) { - u32 *rid = data; + struct iommu_iort_rmr_data *rmr_data; - *rid = alias; - return 0; + rmr_data = container_of(region, struct iommu_iort_rmr_data, rr); + kfree(rmr_data->sids); + kfree(rmr_data); } -static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, - struct fwnode_handle *fwnode, - const struct iommu_ops *ops) +static struct iommu_iort_rmr_data *iort_rmr_alloc( + struct acpi_iort_rmr_desc *rmr_desc, + int prot, enum iommu_resv_type type, + u32 *sids, u32 num_sids) { - int ret = iommu_fwspec_init(dev, fwnode, ops); + struct iommu_iort_rmr_data *rmr_data; + struct iommu_resv_region *region; + u32 *sids_copy; + u64 addr = rmr_desc->base_address, size = rmr_desc->length; - if (!ret) - ret = iommu_fwspec_add_ids(dev, &streamid, 1); + rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL); + if (!rmr_data) + return NULL; - return ret; + /* Create a copy of SIDs array to associate with this rmr_data */ + sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL); + if (!sids_copy) { + kfree(rmr_data); + return NULL; + } + rmr_data->sids = sids_copy; + rmr_data->num_sids = num_sids; + + if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) { + /* PAGE align base addr and size */ + addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); + + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n", + rmr_desc->base_address, + rmr_desc->base_address + rmr_desc->length - 1, + addr, addr + size - 1); + } + + region = &rmr_data->rr; + INIT_LIST_HEAD(®ion->list); + region->start = addr; + region->length = size; + region->prot = prot; + region->type = type; + region->free = iort_rmr_free; + + return rmr_data; } -static inline bool iort_iommu_driver_enabled(u8 type) +static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc, + u32 count) { - switch (type) { - case ACPI_IORT_NODE_SMMU_V3: - return IS_BUILTIN(CONFIG_ARM_SMMU_V3); - case ACPI_IORT_NODE_SMMU: - return IS_BUILTIN(CONFIG_ARM_SMMU); - default: - pr_warn("IORT node type %u does not describe an SMMU\n", type); - return false; + int i, j; + + for (i = 0; i < count; i++) { + u64 end, start = desc[i].base_address, length = desc[i].length; + + if (!length) { + pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n", + start); + continue; + } + + end = start + length - 1; + + /* Check for address overlap */ + for (j = i + 1; j < count; j++) { + u64 e_start = desc[j].base_address; + u64 e_end = e_start + desc[j].length - 1; + + if (start <= e_end && end >= e_start) + pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n", + start, end); + } } } -#ifdef CONFIG_IOMMU_API -static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) +/* + * Please note, we will keep the already allocated RMR reserve + * regions in case of a memory allocation failure. + */ +static void iort_get_rmrs(struct acpi_iort_node *node, + struct acpi_iort_node *smmu, + u32 *sids, u32 num_sids, + struct list_head *head) { - struct acpi_iort_node *iommu; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data; + struct acpi_iort_rmr_desc *rmr_desc; + int i; - iommu = iort_get_iort_node(fwspec->iommu_fwnode); + rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node, + rmr->rmr_offset); - if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { - struct acpi_iort_smmu_v3 *smmu; + iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); - smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; - if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) - return iommu; + for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { + struct iommu_iort_rmr_data *rmr_data; + enum iommu_resv_type type; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED) + type = IOMMU_RESV_DIRECT_RELAXABLE; + else + type = IOMMU_RESV_DIRECT; + + if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE) + prot |= IOMMU_PRIV; + + /* Attributes 0x00 - 0x03 represents device memory */ + if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <= + ACPI_IORT_RMR_ATTR_DEVICE_GRE) + prot |= IOMMU_MMIO; + else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) == + ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB) + prot |= IOMMU_CACHE; + + rmr_data = iort_rmr_alloc(rmr_desc, prot, type, + sids, num_sids); + if (!rmr_data) + return; + + list_add_tail(&rmr_data->rr.list, head); } +} - return NULL; +static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start, + u32 new_count) +{ + u32 *new_sids; + u32 total_count = count + new_count; + int i; + + new_sids = krealloc_array(sids, count + new_count, + sizeof(*new_sids), GFP_KERNEL); + if (!new_sids) { + kfree(sids); + return NULL; + } + + for (i = count; i < total_count; i++) + new_sids[i] = id_start++; + + return new_sids; } -static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) +static bool iort_rmr_has_dev(struct device *dev, u32 id_start, + u32 id_count) { + int i; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - return (fwspec && fwspec->ops) ? fwspec->ops : NULL; + /* + * Make sure the kernel has preserved the boot firmware PCIe + * configuration. This is required to ensure that the RMR PCIe + * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5). + */ + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); + + if (!host->preserve_config) + return false; + } + + for (i = 0; i < fwspec->num_ids; i++) { + if (fwspec->ids[i] >= id_start && + fwspec->ids[i] <= id_start + id_count) + return true; + } + + return false; } -static inline int iort_add_device_replay(const struct iommu_ops *ops, - struct device *dev) +static void iort_node_get_rmr_info(struct acpi_iort_node *node, + struct acpi_iort_node *iommu, + struct device *dev, struct list_head *head) { - int err = 0; + struct acpi_iort_node *smmu = NULL; + struct acpi_iort_rmr *rmr; + struct acpi_iort_id_mapping *map; + u32 *sids = NULL; + u32 num_sids = 0; + int i; - if (dev->bus && !device_iommu_mapped(dev)) - err = iommu_probe_device(dev); + if (!node->mapping_offset || !node->mapping_count) { + pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n", + node); + return; + } - return err; + rmr = (struct acpi_iort_rmr *)node->node_data; + if (!rmr->rmr_offset || !rmr->rmr_count) + return; + + map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, + node->mapping_offset); + + /* + * Go through the ID mappings and see if we have a match for SMMU + * and dev(if !NULL). If found, get the sids for the Node. + * Please note, id_count is equal to the number of IDs in the + * range minus one. + */ + for (i = 0; i < node->mapping_count; i++, map++) { + struct acpi_iort_node *parent; + + parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, + map->output_reference); + if (parent != iommu) + continue; + + /* If dev is valid, check RMR node corresponds to the dev SID */ + if (dev && !iort_rmr_has_dev(dev, map->output_base, + map->id_count)) + continue; + + /* Retrieve SIDs associated with the Node. */ + sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base, + map->id_count + 1); + if (!sids) + return; + + num_sids += map->id_count + 1; + } + + if (!sids) + return; + + iort_get_rmrs(node, smmu, sids, num_sids, head); + kfree(sids); } -/** - * iort_iommu_msi_get_resv_regions - Reserved region driver helper - * @dev: Device from iommu_get_resv_regions() - * @head: Reserved region list from iommu_get_resv_regions() - * - * Returns: Number of msi reserved regions on success (0 if platform - * doesn't require the reservation or no associated msi regions), - * appropriate error value otherwise. The ITS interrupt translation - * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device - * are the msi reserved regions. +static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev, + struct list_head *head) +{ + struct acpi_table_iort *iort; + struct acpi_iort_node *iort_node, *iort_end; + int i; + + /* Only supports ARM DEN 0049E.d onwards */ + if (iort_table->revision < 5) + return; + + iort = (struct acpi_table_iort *)iort_table; + + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, + iort->node_offset); + iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, + iort_table->length); + + for (i = 0; i < iort->node_count; i++) { + if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, + "IORT node pointer overflows, bad table!\n")) + return; + + if (iort_node->type == ACPI_IORT_NODE_RMR) + iort_node_get_rmr_info(iort_node, iommu, dev, head); + + iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, + iort_node->length); + } +} + +/* + * Populate the RMR list associated with a given IOMMU and dev(if provided). + * If dev is NULL, the function populates all the RMRs associated with the + * given IOMMU. */ -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) +static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode, + struct device *dev, + struct list_head *head) +{ + struct acpi_iort_node *iommu; + + iommu = iort_get_iort_node(iommu_fwnode); + if (!iommu) + return; + + iort_find_rmrs(iommu, dev, head); +} + +static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) +{ + struct acpi_iort_node *iommu; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + + iommu = iort_get_iort_node(fwspec->iommu_fwnode); + + if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { + struct acpi_iort_smmu_v3 *smmu; + + smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; + if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) + return iommu; + } + + return NULL; +} + +/* + * Retrieve platform specific HW MSI reserve regions. + * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K) + * associated with the device are the HW MSI reserved regions. + */ +static void iort_iommu_msi_get_resv_regions(struct device *dev, + struct list_head *head) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct acpi_iort_its_group *its; struct acpi_iort_node *iommu_node, *its_node = NULL; - int i, resv = 0; + int i; iommu_node = iort_get_msi_resv_iommu(dev); if (!iommu_node) - return 0; + return; /* * Current logic to reserve ITS regions relies on HW topologies @@ -852,7 +1137,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) } if (!its_node) - return 0; + return; /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)its_node->node_data; @@ -865,33 +1150,94 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) struct iommu_resv_region *region; region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, - prot, IOMMU_RESV_MSI); - if (region) { + prot, IOMMU_RESV_MSI, + GFP_KERNEL); + if (region) list_add_tail(®ion->list, head); - resv++; - } } } +} + +/** + * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions. + * @dev: Device from iommu_get_resv_regions() + * @head: Reserved region list from iommu_get_resv_regions() + */ +void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - return (resv == its->its_count) ? resv : -ENODEV; + iort_iommu_msi_get_resv_regions(dev, head); + iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head); +} + +/** + * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with + * associated StreamIDs information. + * @iommu_fwnode: fwnode associated with IOMMU + * @head: Resereved region list + */ +void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, + struct list_head *head) +{ + iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head); +} +EXPORT_SYMBOL_GPL(iort_get_rmr_sids); + +/** + * iort_put_rmr_sids - Free memory allocated for RMR reserved regions. + * @iommu_fwnode: fwnode associated with IOMMU + * @head: Resereved region list + */ +void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + entry->free(NULL, entry); +} +EXPORT_SYMBOL_GPL(iort_put_rmr_sids); + +static inline bool iort_iommu_driver_enabled(u8 type) +{ + switch (type) { + case ACPI_IORT_NODE_SMMU_V3: + return IS_ENABLED(CONFIG_ARM_SMMU_V3); + case ACPI_IORT_NODE_SMMU: + return IS_ENABLED(CONFIG_ARM_SMMU); + default: + pr_warn("IORT node type %u does not describe an SMMU\n", type); + return false; + } +} + +static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) +{ + struct acpi_iort_root_complex *pci_rc; + + pci_rc = (struct acpi_iort_root_complex *)node->node_data; + return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; +} + +static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node) +{ + struct acpi_iort_memory_access *memory_access; + struct acpi_iort_root_complex *pci_rc; + + pci_rc = (struct acpi_iort_root_complex *)node->node_data; + memory_access = + (struct acpi_iort_memory_access *)&pci_rc->memory_properties; + return memory_access->memory_flags & ACPI_IORT_MF_CANWBS; } -#else -static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) -{ return NULL; } -static inline int iort_add_device_replay(const struct iommu_ops *ops, - struct device *dev) -{ return 0; } -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) -{ return 0; } -#endif static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, u32 streamid) { - const struct iommu_ops *ops; struct fwnode_handle *iort_fwnode; - if (!node) + /* If there's no SMMU driver at all, give up now */ + if (!node || !iort_iommu_driver_enabled(node->type)) return -ENODEV; iort_fwnode = iort_get_fwnode(node); @@ -899,19 +1245,10 @@ static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, return -ENODEV; /* - * If the ops look-up fails, this means that either - * the SMMU drivers have not been probed yet or that - * the SMMU drivers are not built in the kernel; - * Depending on whether the SMMU drivers are built-in - * in the kernel or not, defer the IOMMU configuration - * or just abort it. + * If the SMMU drivers are enabled but not loaded/probed + * yet, this will defer. */ - ops = iommu_ops_from_fwnode(iort_fwnode); - if (!ops) - return iort_iommu_driver_enabled(node->type) ? - -EPROBE_DEFER : -ENODEV; - - return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); + return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode); } struct iort_pci_alias_info { @@ -930,175 +1267,172 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) return iort_iommu_xlate(info->dev, parent, streamid); } -static int nc_dma_get_range(struct device *dev, u64 *size) +static void iort_named_component_init(struct device *dev, + struct acpi_iort_node *node) { - struct acpi_iort_node *node; - struct acpi_iort_named_component *ncomp; - - node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, - iort_match_node_callback, dev); - if (!node) - return -ENODEV; - - ncomp = (struct acpi_iort_named_component *)node->node_data; - - *size = ncomp->memory_address_limit >= 64 ? U64_MAX : - 1ULL<<ncomp->memory_address_limit; - - return 0; + struct property_entry props[3] = {}; + struct acpi_iort_named_component *nc; + + nc = (struct acpi_iort_named_component *)node->node_data; + props[0] = PROPERTY_ENTRY_U32("pasid-num-bits", + FIELD_GET(ACPI_IORT_NC_PASID_BITS, + nc->node_flags)); + if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED) + props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall"); + + if (device_create_managed_software_node(dev, props, NULL)) + dev_warn(dev, "Could not add device properties\n"); } -static int rc_dma_get_range(struct device *dev, u64 *size) +static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node) { - struct acpi_iort_node *node; - struct acpi_iort_root_complex *rc; - struct pci_bus *pbus = to_pci_dev(dev)->bus; + struct acpi_iort_node *parent; + int err = -ENODEV, i = 0; + u32 streamid = 0; - node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, - iort_match_node_callback, &pbus->dev); - if (!node || node->revision < 1) - return -ENODEV; + do { - rc = (struct acpi_iort_root_complex *)node->node_data; + parent = iort_node_map_platform_id(node, &streamid, + IORT_IOMMU_TYPE, + i++); - *size = rc->memory_address_limit >= 64 ? U64_MAX : - 1ULL<<rc->memory_address_limit; + if (parent) + err = iort_iommu_xlate(dev, parent, streamid); + } while (parent && !err); - return 0; + return err; } -/** - * iort_dma_setup() - Set-up device DMA parameters. - * - * @dev: device to configure - * @dma_addr: device DMA address result pointer - * @size: DMA range size result pointer - */ -void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) +static int iort_nc_iommu_map_id(struct device *dev, + struct acpi_iort_node *node, + const u32 *in_id) { - u64 mask, dmaaddr = 0, size = 0, offset = 0; - int ret, msb; - - /* - * If @dev is expected to be DMA-capable then the bus code that created - * it should have initialised its dma_mask pointer by this point. For - * now, we'll continue the legacy behaviour of coercing it to the - * coherent mask if not, but we'll no longer do so quietly. - */ - if (!dev->dma_mask) { - dev_warn(dev, "DMA mask not set\n"); - dev->dma_mask = &dev->coherent_dma_mask; - } - - if (dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); - else - size = 1ULL << 32; - - if (dev_is_pci(dev)) { - ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); - if (ret == -ENODEV) - ret = rc_dma_get_range(dev, &size); - } else { - ret = nc_dma_get_range(dev, &size); - } - - if (!ret) { - msb = fls64(dmaaddr + size - 1); - /* - * Round-up to the power-of-two mask or set - * the mask to the whole 64-bit address space - * in case the DMA region covers the full - * memory window. - */ - mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; - /* - * Limit coherent and dma mask based on size - * retrieved from firmware. - */ - dev->bus_dma_mask = mask; - dev->coherent_dma_mask = mask; - *dev->dma_mask = mask; - } + struct acpi_iort_node *parent; + u32 streamid; - *dma_addr = dmaaddr; - *dma_size = size; + parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE); + if (parent) + return iort_iommu_xlate(dev, parent, streamid); - dev->dma_pfn_offset = PFN_DOWN(offset); - dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); + return -ENODEV; } + /** - * iort_iommu_configure - Set-up IOMMU configuration for a device. + * iort_iommu_configure_id - Set-up IOMMU configuration for a device. * * @dev: device to configure + * @id_in: optional input id const value pointer * - * Returns: iommu_ops pointer on configuration success - * NULL on configuration failure + * Returns: 0 on success, <0 on failure */ -const struct iommu_ops *iort_iommu_configure(struct device *dev) +int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { - struct acpi_iort_node *node, *parent; - const struct iommu_ops *ops; - u32 streamid = 0; + struct acpi_iort_node *node; int err = -ENODEV; - /* - * If we already translated the fwspec there - * is nothing left to do, return the iommu_ops. - */ - ops = iort_fwspec_iommu_ops(dev); - if (ops) - return ops; - if (dev_is_pci(dev)) { + struct iommu_fwspec *fwspec; struct pci_bus *bus = to_pci_dev(dev)->bus; struct iort_pci_alias_info info = { .dev = dev }; node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, iort_match_node_callback, &bus->dev); if (!node) - return NULL; + return -ENODEV; info.node = node; err = pci_for_each_dma_alias(to_pci_dev(dev), iort_pci_iommu_init, &info); - } else { - int i = 0; + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && iort_pci_rc_supports_ats(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + if (fwspec && iort_pci_rc_supports_canwbs(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS; + } else { node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, iort_match_node_callback, dev); if (!node) - return NULL; + return -ENODEV; - do { - parent = iort_node_map_platform_id(node, &streamid, - IORT_IOMMU_TYPE, - i++); + err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) : + iort_nc_iommu_map(dev, node); - if (parent) - err = iort_iommu_xlate(dev, parent, streamid); - } while (parent && !err); + if (!err) + iort_named_component_init(dev, node); } - /* - * If we have reason to believe the IOMMU driver missed the initial - * add_device callback for dev, replay it to get things in order. - */ - if (!err) { - ops = iort_fwspec_iommu_ops(dev); - err = iort_add_device_replay(ops, dev); + return err; +} + +#else +void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head) +{ } +int iort_iommu_configure_id(struct device *dev, const u32 *input_id) +{ return -ENODEV; } +#endif + +static int nc_dma_get_range(struct device *dev, u64 *limit) +{ + struct acpi_iort_node *node; + struct acpi_iort_named_component *ncomp; + + node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, + iort_match_node_callback, dev); + if (!node) + return -ENODEV; + + ncomp = (struct acpi_iort_named_component *)node->node_data; + + if (!ncomp->memory_address_limit) { + pr_warn(FW_BUG "Named component missing memory address limit\n"); + return -EINVAL; } - /* Ignore all other errors apart from EPROBE_DEFER */ - if (err == -EPROBE_DEFER) { - ops = ERR_PTR(err); - } else if (err) { - dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); - ops = NULL; + *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : + (1ULL << ncomp->memory_address_limit) - 1; + + return 0; +} + +static int rc_dma_get_range(struct device *dev, u64 *limit) +{ + struct acpi_iort_node *node; + struct acpi_iort_root_complex *rc; + struct pci_bus *pbus = to_pci_dev(dev)->bus; + + node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, + iort_match_node_callback, &pbus->dev); + if (!node || node->revision < 1) + return -ENODEV; + + rc = (struct acpi_iort_root_complex *)node->node_data; + + if (!rc->memory_address_limit) { + pr_warn(FW_BUG "Root complex missing memory address limit\n"); + return -EINVAL; } - return ops; + *limit = rc->memory_address_limit >= 64 ? U64_MAX : + (1ULL << rc->memory_address_limit) - 1; + + return 0; +} + +/** + * iort_dma_get_ranges() - Look up DMA addressing limit for the device + * @dev: device to lookup + * @limit: DMA limit result pointer + * + * Return: 0 on success, an error otherwise. + */ +int iort_dma_get_ranges(struct device *dev, u64 *limit) +{ + if (dev_is_pci(dev)) + return rc_dma_get_range(dev, limit); + else + return nc_dma_get_range(dev, limit); } static void __init acpi_iort_register_irq(int hwirq, const char *name, @@ -1218,32 +1552,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, } } -static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) +static void __init arm_smmu_v3_dma_configure(struct device *dev, + struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + enum dev_dma_attr attr; /* Retrieve SMMUv3 specific data */ smmu = (struct acpi_iort_smmu_v3 *)node->node_data; - return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; + attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; + + /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ + dev->dma_mask = &dev->coherent_dma_mask; + + /* Configure DMA for the page table walker */ + acpi_dma_configure(dev, attr); } #if defined(CONFIG_ACPI_NUMA) /* * set numa proximity domain for smmuv3 device */ -static void __init arm_smmu_v3_set_proximity(struct device *dev, +static int __init arm_smmu_v3_set_proximity(struct device *dev, struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; smmu = (struct acpi_iort_smmu_v3 *)node->node_data; if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { - set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); + int dev_node = pxm_to_node(smmu->pxm); + + if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) + return -EINVAL; + + set_dev_node(dev, dev_node); pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", smmu->base_address, smmu->pxm); } + return 0; } #else #define arm_smmu_v3_set_proximity NULL @@ -1301,30 +1650,116 @@ static void __init arm_smmu_init_resources(struct resource *res, } } -static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) +static void __init arm_smmu_dma_configure(struct device *dev, + struct acpi_iort_node *node) { struct acpi_iort_smmu *smmu; + enum dev_dma_attr attr; /* Retrieve SMMU specific data */ smmu = (struct acpi_iort_smmu *)node->node_data; - return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; + attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; + + /* We expect the dma masks to be equivalent for SMMU set-ups */ + dev->dma_mask = &dev->coherent_dma_mask; + + /* Configure DMA for the page table walker */ + acpi_dma_configure(dev, attr); +} + +static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) +{ + struct acpi_iort_pmcg *pmcg; + + /* Retrieve PMCG specific data */ + pmcg = (struct acpi_iort_pmcg *)node->node_data; + + /* + * There are always 2 memory resources. + * If the overflow_gsiv is present then add that for a total of 3. + */ + return pmcg->overflow_gsiv ? 3 : 2; +} + +static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, + struct acpi_iort_node *node) +{ + struct acpi_iort_pmcg *pmcg; + + /* Retrieve PMCG specific data */ + pmcg = (struct acpi_iort_pmcg *)node->node_data; + + res[0].start = pmcg->page0_base_address; + res[0].end = pmcg->page0_base_address + SZ_4K - 1; + res[0].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } + + if (pmcg->overflow_gsiv) + acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", + ACPI_EDGE_SENSITIVE, &res[2]); +} + +static struct acpi_platform_list pmcg_plat_info[] __initdata = { + /* HiSilicon Hip08 Platform */ + {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08}, + /* HiSilicon Hip09 Platform */ + {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */ + {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + { } +}; + +static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) +{ + u32 model; + int idx; + + idx = acpi_match_platform_list(pmcg_plat_info); + if (idx >= 0) + model = pmcg_plat_info[idx].data; + else + model = IORT_SMMU_V3_PMCG_GENERIC; + + return platform_device_add_data(pdev, &model, sizeof(model)); } struct iort_dev_config { const char *name; int (*dev_init)(struct acpi_iort_node *node); - bool (*dev_is_coherent)(struct acpi_iort_node *node); + void (*dev_dma_configure)(struct device *dev, + struct acpi_iort_node *node); int (*dev_count_resources)(struct acpi_iort_node *node); void (*dev_init_resources)(struct resource *res, struct acpi_iort_node *node); - void (*dev_set_proximity)(struct device *dev, + int (*dev_set_proximity)(struct device *dev, struct acpi_iort_node *node); + int (*dev_add_platdata)(struct platform_device *pdev); }; static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { .name = "arm-smmu-v3", - .dev_is_coherent = arm_smmu_v3_is_coherent, + .dev_dma_configure = arm_smmu_v3_dma_configure, .dev_count_resources = arm_smmu_v3_count_resources, .dev_init_resources = arm_smmu_v3_init_resources, .dev_set_proximity = arm_smmu_v3_set_proximity, @@ -1332,9 +1767,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { .name = "arm-smmu", - .dev_is_coherent = arm_smmu_is_coherent, + .dev_dma_configure = arm_smmu_dma_configure, .dev_count_resources = arm_smmu_count_resources, - .dev_init_resources = arm_smmu_init_resources + .dev_init_resources = arm_smmu_init_resources, +}; + +static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { + .name = "arm-smmu-v3-pmcg", + .dev_count_resources = arm_smmu_v3_pmcg_count_resources, + .dev_init_resources = arm_smmu_v3_pmcg_init_resources, + .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, }; static __init const struct iort_dev_config *iort_get_dev_cfg( @@ -1345,6 +1787,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg( return &iort_arm_smmu_v3_cfg; case ACPI_IORT_NODE_SMMU: return &iort_arm_smmu_cfg; + case ACPI_IORT_NODE_PMCG: + return &iort_arm_smmu_v3_pmcg_cfg; default: return NULL; } @@ -1353,6 +1797,7 @@ static __init const struct iort_dev_config *iort_get_dev_cfg( /** * iort_add_platform_device() - Allocate a platform device for IORT node * @node: Pointer to device ACPI IORT node + * @ops: Pointer to IORT device config struct * * Returns: 0 on success, <0 failure */ @@ -1362,15 +1807,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, struct fwnode_handle *fwnode; struct platform_device *pdev; struct resource *r; - enum dev_dma_attr attr; int ret, count; pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM; - if (ops->dev_set_proximity) - ops->dev_set_proximity(&pdev->dev, node); + if (ops->dev_set_proximity) { + ret = ops->dev_set_proximity(&pdev->dev, node); + if (ret) + goto dev_put; + } count = ops->dev_count_resources(node); @@ -1393,19 +1840,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, goto dev_put; /* - * Add a copy of IORT node pointer to platform_data to - * be used to retrieve IORT data information. + * Platform devices based on PMCG nodes uses platform_data to + * pass the hardware model info to the driver. For others, add + * a copy of IORT node pointer to platform_data to be used to + * retrieve IORT data information. */ - ret = platform_device_add_data(pdev, &node, sizeof(node)); + if (ops->dev_add_platdata) + ret = ops->dev_add_platdata(pdev); + else + ret = platform_device_add_data(pdev, &node, sizeof(node)); + if (ret) goto dev_put; - /* - * We expect the dma masks to be equivalent for - * all SMMUs set-ups - */ - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - fwnode = iort_get_fwnode(node); if (!fwnode) { @@ -1415,11 +1862,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, pdev->dev.fwnode = fwnode; - attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? - DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; - - /* Configure DMA for the page table walker */ - acpi_dma_configure(&pdev->dev, attr); + if (ops->dev_dma_configure) + ops->dev_dma_configure(&pdev->dev, node); iort_set_device_domain(&pdev->dev, node); @@ -1529,6 +1973,10 @@ void __init acpi_iort_init(void) { acpi_status status; + /* iort_table will be used at runtime after the iort init, + * so we don't need to call acpi_put_table() to release + * the IORT table mapping. + */ status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { @@ -1542,3 +1990,58 @@ void __init acpi_iort_init(void) iort_init_platform_devices(); } + +#ifdef CONFIG_ZONE_DMA +/* + * Extract the highest CPU physical address accessible to all DMA masters in + * the system. PHYS_ADDR_MAX is returned when no constrained device is found. + */ +phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void) +{ + phys_addr_t limit = PHYS_ADDR_MAX; + struct acpi_iort_node *node, *end; + struct acpi_table_iort *iort; + acpi_status status; + int i; + + if (acpi_disabled) + return limit; + + status = acpi_get_table(ACPI_SIG_IORT, 0, + (struct acpi_table_header **)&iort); + if (ACPI_FAILURE(status)) + return limit; + + node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset); + end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length); + + for (i = 0; i < iort->node_count; i++) { + if (node >= end) + break; + + switch (node->type) { + struct acpi_iort_named_component *ncomp; + struct acpi_iort_root_complex *rc; + phys_addr_t local_limit; + + case ACPI_IORT_NODE_NAMED_COMPONENT: + ncomp = (struct acpi_iort_named_component *)node->node_data; + local_limit = DMA_BIT_MASK(ncomp->memory_address_limit); + limit = min_not_zero(limit, local_limit); + break; + + case ACPI_IORT_NODE_PCI_ROOT_COMPLEX: + if (node->revision < 1) + break; + + rc = (struct acpi_iort_root_complex *)node->node_data; + local_limit = DMA_BIT_MASK(rc->memory_address_limit); + limit = min_not_zero(limit, local_limit); + break; + } + node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length); + } + acpi_put_table(&iort->header); + return limit; +} +#endif diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 000000000000..84963a20c3e7 --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2025 Arm Ltd. + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ + +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include <linux/acpi.h> +#include <linux/arm_mpam.h> +#include <linux/bits.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/platform_device.h> + +#include <acpi/processor.h> + +/* + * Flags for acpi_table_mpam_msc.*_interrupt_flags. + * See 2.1.1 Interrupt Flags, Table 5, of DEN0065B_MPAM_ACPI_3.0-bet. + */ +#define ACPI_MPAM_MSC_IRQ_MODE BIT(0) +#define ACPI_MPAM_MSC_IRQ_TYPE_MASK GENMASK(2, 1) +#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK BIT(3) +#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER 1 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID BIT(4) + +/* + * Encodings for the MSC node body interface type field. + * See 2.1 MPAM MSC node, Table 4 of DEN0065B_MPAM_ACPI_3.0-bet. + */ +#define ACPI_MPAM_MSC_IFACE_MMIO 0x00 +#define ACPI_MPAM_MSC_IFACE_PCC 0x0a + +static bool _is_ppi_partition(u32 flags) +{ + u32 aff_type, is_ppi; + bool ret; + + is_ppi = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_VALID, flags); + if (!is_ppi) + return false; + + aff_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK, flags); + ret = (aff_type == ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER); + if (ret) + pr_err_once("Partitioned interrupts not supported\n"); + + return ret; +} + +static int acpi_mpam_register_irq(struct platform_device *pdev, + u32 intid, u32 flags) +{ + int irq; + u32 int_type; + int trigger; + + if (!intid) + return -EINVAL; + + if (_is_ppi_partition(flags)) + return -EINVAL; + + trigger = FIELD_GET(ACPI_MPAM_MSC_IRQ_MODE, flags); + int_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_TYPE_MASK, flags); + if (int_type != ACPI_MPAM_MSC_IRQ_TYPE_WIRED) + return -EINVAL; + + irq = acpi_register_gsi(&pdev->dev, intid, trigger, ACPI_ACTIVE_HIGH); + if (irq < 0) + pr_err_once("Failed to register interrupt 0x%x with ACPI\n", intid); + + return irq; +} + +static void acpi_mpam_parse_irqs(struct platform_device *pdev, + struct acpi_mpam_msc_node *tbl_msc, + struct resource *res, int *res_idx) +{ + u32 flags, intid; + int irq; + + intid = tbl_msc->overflow_interrupt; + flags = tbl_msc->overflow_interrupt_flags; + irq = acpi_mpam_register_irq(pdev, intid, flags); + if (irq > 0) + res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "overflow"); + + intid = tbl_msc->error_interrupt; + flags = tbl_msc->error_interrupt_flags; + irq = acpi_mpam_register_irq(pdev, intid, flags); + if (irq > 0) + res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "error"); +} + +static int acpi_mpam_parse_resource(struct mpam_msc *msc, + struct acpi_mpam_resource_node *res) +{ + int level, nid; + u32 cache_id; + + switch (res->locator_type) { + case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: + cache_id = res->locator.cache_locator.cache_reference; + level = find_acpi_cache_level_from_id(cache_id); + if (level <= 0) { + pr_err_once("Bad level (%d) for cache with id %u\n", level, cache_id); + return -EINVAL; + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, + level, cache_id); + case ACPI_MPAM_LOCATION_TYPE_MEMORY: + nid = pxm_to_node(res->locator.memory_locator.proximity_domain); + if (nid == NUMA_NO_NODE) { + pr_debug("Bad proximity domain %lld, using node 0 instead\n", + res->locator.memory_locator.proximity_domain); + nid = 0; + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, + MPAM_CLASS_ID_DEFAULT, nid); + default: + /* These get discovered later and are treated as unknown */ + return 0; + } +} + +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + int i, err; + char *ptr, *table_end; + struct acpi_mpam_resource_node *resource; + + table_end = (char *)tbl_msc + tbl_msc->length; + ptr = (char *)(tbl_msc + 1); + for (i = 0; i < tbl_msc->num_resource_nodes; i++) { + u64 max_deps, remaining_table; + + if (ptr + sizeof(*resource) > table_end) + return -EINVAL; + + resource = (struct acpi_mpam_resource_node *)ptr; + + remaining_table = table_end - ptr; + max_deps = remaining_table / sizeof(struct acpi_mpam_func_deps); + if (resource->num_functional_deps > max_deps) { + pr_debug("MSC has impossible number of functional dependencies\n"); + return -EINVAL; + } + + err = acpi_mpam_parse_resource(msc, resource); + if (err) + return err; + + ptr += sizeof(*resource); + ptr += resource->num_functional_deps * sizeof(struct acpi_mpam_func_deps); + } + + return 0; +} + +/* + * Creates the device power management link and returns true if the + * acpi id is valid and usable for cpu affinity. This is the case + * when the linked device is a processor or a processor container. + */ +static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, + struct platform_device *pdev, + u32 *acpi_id) +{ + char hid[sizeof(tbl_msc->hardware_id_linked_device) + 1] = { 0 }; + bool acpi_id_valid = false; + struct acpi_device *buddy; + char uid[11]; + int len; + + memcpy(hid, &tbl_msc->hardware_id_linked_device, + sizeof(tbl_msc->hardware_id_linked_device)); + + if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { + *acpi_id = tbl_msc->instance_id_linked_device; + acpi_id_valid = true; + } + + len = snprintf(uid, sizeof(uid), "%u", + tbl_msc->instance_id_linked_device); + if (len >= sizeof(uid)) { + pr_debug("Failed to convert uid of device for power management."); + return acpi_id_valid; + } + + buddy = acpi_dev_get_first_match_dev(hid, uid, -1); + if (buddy) { + device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); + acpi_dev_put(buddy); + } + + return acpi_id_valid; +} + +static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, + enum mpam_msc_iface *iface) +{ + switch (tbl_msc->interface_type) { + case ACPI_MPAM_MSC_IFACE_MMIO: + *iface = MPAM_IFACE_MMIO; + return 0; + case ACPI_MPAM_MSC_IFACE_PCC: + *iface = MPAM_IFACE_PCC; + return 0; + default: + return -EINVAL; + } +} + +static struct platform_device * __init acpi_mpam_parse_msc(struct acpi_mpam_msc_node *tbl_msc) +{ + struct platform_device *pdev __free(platform_device_put) = + platform_device_alloc("mpam_msc", tbl_msc->identifier); + int next_res = 0, next_prop = 0, err; + /* pcc, nrdy, affinity and a sentinel */ + struct property_entry props[4] = { 0 }; + /* mmio, 2xirq, no sentinel. */ + struct resource res[3] = { 0 }; + struct acpi_device *companion; + enum mpam_msc_iface iface; + char uid[16]; + u32 acpi_id; + + if (!pdev) + return ERR_PTR(-ENOMEM); + + /* Some power management is described in the namespace: */ + err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); + if (err > 0 && err < sizeof(uid)) { + companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); + if (companion) { + ACPI_COMPANION_SET(&pdev->dev, companion); + acpi_dev_put(companion); + } else { + pr_debug("MSC.%u: missing namespace entry\n", tbl_msc->identifier); + } + } + + if (decode_interface_type(tbl_msc, &iface)) { + pr_debug("MSC.%u: unknown interface type\n", tbl_msc->identifier); + return ERR_PTR(-EINVAL); + } + + if (iface == MPAM_IFACE_MMIO) { + res[next_res++] = DEFINE_RES_MEM_NAMED(tbl_msc->base_address, + tbl_msc->mmio_size, + "MPAM:MSC"); + } else if (iface == MPAM_IFACE_PCC) { + props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", + tbl_msc->base_address); + } + + acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); + + WARN_ON_ONCE(next_res > ARRAY_SIZE(res)); + err = platform_device_add_resources(pdev, res, next_res); + if (err) + return ERR_PTR(err); + + props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", + tbl_msc->max_nrdy_usec); + + /* + * The MSC's CPU affinity is described via its linked power + * management device, but only if it points at a Processor or + * Processor Container. + */ + if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) + props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", acpi_id); + + WARN_ON_ONCE(next_prop > ARRAY_SIZE(props) - 1); + err = device_create_managed_software_node(&pdev->dev, props, NULL); + if (err) + return ERR_PTR(err); + + /* + * Stash the table entry for acpi_mpam_parse_resources() to discover + * what this MSC controls. + */ + err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); + if (err) + return ERR_PTR(err); + + err = platform_device_add(pdev); + if (err) + return ERR_PTR(err); + + return_ptr(pdev); +} + +static int __init acpi_mpam_parse(void) +{ + char *table_end, *table_offset; + struct acpi_mpam_msc_node *tbl_msc; + struct platform_device *pdev; + + if (acpi_disabled || !system_supports_mpam()) + return 0; + + struct acpi_table_header *table __free(acpi_put_table) = + acpi_get_table_pointer(ACPI_SIG_MPAM, 0); + + if (IS_ERR(table)) + return 0; + + if (table->revision < 1) { + pr_debug("MPAM ACPI table revision %d not supported\n", table->revision); + return 0; + } + + table_offset = (char *)(table + 1); + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + if (table_offset + sizeof(*tbl_msc) > table_end || + table_offset + tbl_msc->length > table_end) { + pr_err("MSC entry overlaps end of ACPI table\n"); + return -EINVAL; + } + table_offset += tbl_msc->length; + + /* + * If any of the reserved fields are set, make no attempt to + * parse the MSC structure. This MSC will still be counted by + * acpi_mpam_count_msc(), meaning the MPAM driver can't probe + * against all MSC, and will never be enabled. There is no way + * to enable it safely, because we cannot determine safe + * system-wide partid and pmg ranges in this situation. + */ + if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) { + pr_err_once("Unrecognised MSC, MPAM not usable\n"); + pr_debug("MSC.%u: reserved field set\n", tbl_msc->identifier); + continue; + } + + if (!tbl_msc->mmio_size) { + pr_debug("MSC.%u: marked as disabled\n", tbl_msc->identifier); + continue; + } + + pdev = acpi_mpam_parse_msc(tbl_msc); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + } + + return 0; +} + +/** + * acpi_mpam_count_msc() - Count the number of MSC described by firmware. + * + * Returns the number of MSCs, or zero for an error. + * + * This can be called before or in parallel with acpi_mpam_parse(). + */ +int acpi_mpam_count_msc(void) +{ + char *table_end, *table_offset; + struct acpi_mpam_msc_node *tbl_msc; + int count = 0; + + if (acpi_disabled || !system_supports_mpam()) + return 0; + + struct acpi_table_header *table __free(acpi_put_table) = + acpi_get_table_pointer(ACPI_SIG_MPAM, 0); + + if (IS_ERR(table)) + return 0; + + if (table->revision < 1) + return 0; + + table_offset = (char *)(table + 1); + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + + if (table_offset + sizeof(*tbl_msc) > table_end) + return -EINVAL; + if (tbl_msc->length < sizeof(*tbl_msc)) + return -EINVAL; + if (tbl_msc->length > table_end - table_offset) + return -EINVAL; + table_offset += tbl_msc->length; + + if (!tbl_msc->mmio_size) + continue; + + count++; + } + + return count; +} + +/* + * Call after ACPI devices have been created, which happens behind acpi_scan_init() + * called from subsys_initcall(). PCC requires the mailbox driver, which is + * initialised from postcore_initcall(). + */ +subsys_initcall_sync(acpi_mpam_parse); diff --git a/drivers/acpi/arm64/thermal_cpufreq.c b/drivers/acpi/arm64/thermal_cpufreq.c new file mode 100644 index 000000000000..582854914c5c --- /dev/null +++ b/drivers/acpi/arm64/thermal_cpufreq.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include <linux/export.h> + +#include "../internal.h" + +#define SMCCC_SOC_ID_T241 0x036b0241 + +int acpi_arch_thermal_cpufreq_pctg(void) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + + /* + * Check JEP106 code for NVIDIA Tegra241 chip (036b:0241) and + * reduce the CPUFREQ Thermal reduction percentage to 5%. + */ + if (soc_id == SMCCC_SOC_ID_T241) + return 5; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_arch_thermal_cpufreq_pctg); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index cb97b6105f52..34181fa52e93 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * battery.c - ACPI Battery Driver (Revision: 2.0) * @@ -5,25 +6,10 @@ * Copyright (C) 2004-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com> * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define pr_fmt(fmt) "ACPI: battery: " fmt -#include <linux/async.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/jiffies.h> @@ -35,83 +21,68 @@ #include <linux/suspend.h> #include <linux/types.h> -#include <asm/unaligned.h> - -#ifdef CONFIG_ACPI_PROCFS_POWER -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/uaccess.h> -#endif +#include <linux/unaligned.h> #include <linux/acpi.h> #include <linux/power_supply.h> #include <acpi/battery.h> -#define PREFIX "ACPI: " - #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF +#define ACPI_BATTERY_CAPACITY_VALID(capacity) \ + ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN) #define ACPI_BATTERY_DEVICE_NAME "Battery" /* Battery power unit: 0 means mW, 1 means mA */ #define ACPI_BATTERY_POWER_UNIT_MA 1 -#define ACPI_BATTERY_STATE_DISCHARGING 0x1 -#define ACPI_BATTERY_STATE_CHARGING 0x2 -#define ACPI_BATTERY_STATE_CRITICAL 0x4 - -#define _COMPONENT ACPI_BATTERY_COMPONENT +#define ACPI_BATTERY_STATE_DISCHARGING 0x1 +#define ACPI_BATTERY_STATE_CHARGING 0x2 +#define ACPI_BATTERY_STATE_CRITICAL 0x4 +#define ACPI_BATTERY_STATE_CHARGE_LIMITING 0x8 -ACPI_MODULE_NAME("battery"); +#define MAX_STRING_LENGTH 64 MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); MODULE_DESCRIPTION("ACPI Battery Driver"); MODULE_LICENSE("GPL"); -static async_cookie_t async_cookie; -static bool battery_driver_registered; static int battery_bix_broken_package; static int battery_notification_delay_ms; static int battery_ac_is_broken; -static int battery_check_pmic = 1; static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); -#ifdef CONFIG_ACPI_PROCFS_POWER -extern struct proc_dir_entry *acpi_lock_battery_dir(void); -extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); -#endif - static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, + + /* Microsoft Surface Go 3 */ + {"MSHW0146", 0}, + {"", 0}, }; MODULE_DEVICE_TABLE(acpi, battery_device_ids); -/* Lists of PMIC ACPI HIDs with an (often better) native battery driver */ -static const char * const acpi_battery_blacklist[] = { - "INT33F4", /* X-Powers AXP288 PMIC */ -}; - enum { ACPI_BATTERY_ALARM_PRESENT, ACPI_BATTERY_XINFO_PRESENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, /* On Lenovo Thinkpad models from 2010 and 2011, the power unit - switches between mWh and mAh depending on whether the system - is running on battery or not. When mAh is the unit, most - reported values are incorrect and need to be adjusted by - 10000/design_voltage. Verified on x201, t410, t410s, and x220. - Pre-2010 and 2012 models appear to always report in mWh and - are thus unaffected (tested with t42, t61, t500, x200, x300, - and x230). Also, in mid-2012 Lenovo issued a BIOS update for - the 2011 models that fixes the issue (tested on x220 with a - post-1.29 BIOS), but as of Nov. 2012, no such update is - available for the 2010 models. */ + * switches between mWh and mAh depending on whether the system + * is running on battery or not. When mAh is the unit, most + * reported values are incorrect and need to be adjusted by + * 10000/design_voltage. Verified on x201, t410, t410s, and x220. + * Pre-2010 and 2012 models appear to always report in mWh and + * are thus unaffected (tested with t42, t61, t500, x200, x300, + * and x230). Also, in mid-2012 Lenovo issued a BIOS update for + * the 2011 models that fixes the issue (tested on x220 with a + * post-1.29 BIOS), but as of Nov. 2012, no such update is + * available for the 2010 models. + */ ACPI_BATTERY_QUIRK_THINKPAD_MAH, /* for batteries reporting current capacity with design capacity * on a full charge, but showing degradation in full charge cap. @@ -120,8 +91,7 @@ enum { }; struct acpi_battery { - struct mutex lock; - struct mutex sysfs_lock; + struct mutex update_lock; struct power_supply *bat; struct power_supply_desc bat_desc; struct acpi_device *device; @@ -147,10 +117,10 @@ struct acpi_battery { int capacity_granularity_1; int capacity_granularity_2; int alarm; - char model_number[32]; - char serial_number[32]; - char type[32]; - char oem_info[32]; + char model_number[MAX_STRING_LENGTH]; + char serial_number[MAX_STRING_LENGTH]; + char type[MAX_STRING_LENGTH]; + char oem_info[MAX_STRING_LENGTH]; int state; int power_unit; unsigned long flags; @@ -182,7 +152,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery); static int acpi_battery_is_charged(struct acpi_battery *battery) { - /* charging, discharging or critical low */ + /* charging, discharging, critical low or charge limited */ if (battery->state != 0) return 0; @@ -196,7 +166,7 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) return 1; /* fallback to using design values for broken batteries */ - if (battery->design_capacity == battery->capacity_now) + if (battery->design_capacity <= battery->capacity_now) return 1; /* we don't do any sort of metric based on percentages */ @@ -205,7 +175,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) static bool acpi_battery_is_degraded(struct acpi_battery *battery) { - return battery->full_charge_capacity && battery->design_capacity && + return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) && battery->full_charge_capacity < battery->design_capacity; } @@ -227,7 +198,7 @@ static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret = 0; + int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); if (acpi_battery_present(battery)) { @@ -241,10 +212,12 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = acpi_battery_handle_discharging(battery); else if (battery->state & ACPI_BATTERY_STATE_CHARGING) val->intval = POWER_SUPPLY_STATUS_CHARGING; + else if (battery->state & ACPI_BATTERY_STATE_CHARGE_LIMITING) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; else - val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = acpi_battery_present(battery); @@ -276,14 +249,14 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) ret = -ENODEV; else val->intval = battery->design_capacity * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) ret = -ENODEV; else val->intval = battery->full_charge_capacity * 1000; @@ -296,11 +269,17 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = battery->capacity_now * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: - if (battery->capacity_now && battery->full_charge_capacity) - val->intval = battery->capacity_now * 100/ - battery->full_charge_capacity; + if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) + full_capacity = battery->full_charge_capacity; + else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_capacity = battery->design_capacity; + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; else - val->intval = 0; + val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL, + full_capacity); break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: if (battery->state & ACPI_BATTERY_STATE_CRITICAL) @@ -328,7 +307,7 @@ static int acpi_battery_get_property(struct power_supply *psy, return ret; } -static enum power_supply_property charge_battery_props[] = { +static const enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -346,7 +325,21 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -static enum power_supply_property energy_battery_props[] = { +static const enum power_supply_property charge_battery_full_cap_broken_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + +static const enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -364,7 +357,7 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -static enum power_supply_property energy_battery_full_cap_broken_props[] = { +static const enum power_supply_property energy_battery_full_cap_broken_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -378,9 +371,7 @@ static enum power_supply_property energy_battery_full_cap_broken_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -/* -------------------------------------------------------------------------- - Battery Management - -------------------------------------------------------------------------- */ +/* Battery Management */ struct acpi_offsets { size_t offset; /* offset inside struct acpi_sbs_battery */ u8 mode; /* int or string? */ @@ -438,6 +429,7 @@ static int extract_package(struct acpi_battery *battery, { int i; union acpi_object *element; + if (package->type != ACPI_TYPE_PACKAGE) return -EFAULT; for (i = 0; i < num; ++i) { @@ -446,15 +438,25 @@ static int extract_package(struct acpi_battery *battery, element = &package->package.elements[i]; if (offsets[i].mode) { u8 *ptr = (u8 *)battery + offsets[i].offset; - if (element->type == ACPI_TYPE_STRING || - element->type == ACPI_TYPE_BUFFER) - strncpy(ptr, element->string.pointer, 32); - else if (element->type == ACPI_TYPE_INTEGER) { - strncpy(ptr, (u8 *)&element->integer.value, - sizeof(u64)); - ptr[sizeof(u64)] = 0; - } else + u32 len = MAX_STRING_LENGTH; + + switch (element->type) { + case ACPI_TYPE_BUFFER: + if (len > element->buffer.length + 1) + len = element->buffer.length + 1; + + fallthrough; + case ACPI_TYPE_STRING: + strscpy(ptr, element->string.pointer, len); + + break; + case ACPI_TYPE_INTEGER: + strscpy(ptr, (u8 *)&element->integer.value, sizeof(u64) + 1); + + break; + default: *ptr = 0; /* don't have value */ + } } else { int *x = (int *)((u8 *)battery + offsets[i].offset); *x = (element->type == ACPI_TYPE_INTEGER) ? @@ -467,7 +469,8 @@ static int extract_package(struct acpi_battery *battery, static int acpi_battery_get_status(struct acpi_battery *battery) { if (acpi_bus_get_status(battery->device)) { - ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA")); + acpi_handle_info(battery->device->handle, + "_STA evaluation failed\n"); return -ENODEV; } return 0; @@ -503,10 +506,12 @@ static int extract_battery_info(const int use_bix, battery->design_capacity_warning * 10000 / battery->design_voltage; /* Curiously, design_capacity_low, unlike the rest of them, - is correct. */ + * is correct. + */ /* capacity_granularity_* equal 1 on the systems tested, so - it's impossible to tell if they would need an adjustment - or not if their values were higher. */ + * it's impossible to tell if they would need an adjustment + * or not if their values were higher. + */ } if (test_bit(ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE, &battery->flags) && battery->capacity_now > battery->full_charge_capacity) @@ -529,15 +534,15 @@ static int acpi_battery_get_info(struct acpi_battery *battery) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status = AE_ERROR; - mutex_lock(&battery->lock); status = acpi_evaluate_object(battery->device->handle, use_bix ? "_BIX":"_BIF", NULL, &buffer); - mutex_unlock(&battery->lock); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", - use_bix ? "_BIX":"_BIF")); + acpi_handle_info(battery->device->handle, + "%s evaluation failed: %s\n", + use_bix ? "_BIX":"_BIF", + acpi_format_exception(status)); } else { result = extract_battery_info(use_bix, battery, @@ -568,13 +573,12 @@ static int acpi_battery_get_state(struct acpi_battery *battery) msecs_to_jiffies(cache_time))) return 0; - mutex_lock(&battery->lock); status = acpi_evaluate_object(battery->device->handle, "_BST", NULL, &buffer); - mutex_unlock(&battery->lock); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST")); + acpi_handle_info(battery->device->handle, + "_BST evaluation failed: %s", + acpi_format_exception(status)); return -ENODEV; } @@ -591,7 +595,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery) battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN && (s16)(battery->rate_now) < 0) { battery->rate_now = abs((s16)battery->rate_now); - pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n"); + pr_warn_once(FW_BUG "(dis)charge rate invalid.\n"); } if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) @@ -618,15 +622,14 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery) !test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags)) return -ENODEV; - mutex_lock(&battery->lock); status = acpi_execute_simple_method(battery->device->handle, "_BTP", battery->alarm); - mutex_unlock(&battery->lock); - if (ACPI_FAILURE(status)) return -ENODEV; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm)); + acpi_handle_debug(battery->device->handle, "Alarm set to %d\n", + battery->alarm); + return 0; } @@ -648,7 +651,8 @@ static ssize_t acpi_battery_alarm_show(struct device *dev, char *buf) { struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); - return sprintf(buf, "%d\n", battery->alarm * 1000); + + return sysfs_emit(buf, "%d\n", battery->alarm * 1000); } static ssize_t acpi_battery_alarm_store(struct device *dev, @@ -657,6 +661,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, { unsigned long x; struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); + if (sscanf(buf, "%lu\n", &x) == 1) battery->alarm = x/1000; if (acpi_battery_present(battery)) @@ -664,12 +669,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* * The Battery Hooking API * @@ -683,27 +694,35 @@ static LIST_HEAD(acpi_battery_list); static LIST_HEAD(battery_hook_list); static DEFINE_MUTEX(hook_mutex); -static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock) +static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook) { struct acpi_battery *battery; + /* * In order to remove a hook, we first need to * de-register all the batteries that are registered. */ - if (lock) - mutex_lock(&hook_mutex); list_for_each_entry(battery, &acpi_battery_list, list) { - hook->remove_battery(battery->bat); + if (!hook->remove_battery(battery->bat, hook)) + power_supply_changed(battery->bat); } - list_del(&hook->list); - if (lock) - mutex_unlock(&hook_mutex); - pr_info("extension unregistered: %s\n", hook->name); + list_del_init(&hook->list); + + pr_info("hook unregistered: %s\n", hook->name); } void battery_hook_unregister(struct acpi_battery_hook *hook) { - __battery_hook_unregister(hook, 1); + mutex_lock(&hook_mutex); + /* + * Ignore already unregistered battery hooks. This might happen + * if a battery hook was previously unloaded due to an error when + * adding a new battery. + */ + if (!list_empty(&hook->list)) + battery_hook_unregister_unlocked(hook); + + mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_unregister); @@ -712,7 +731,6 @@ void battery_hook_register(struct acpi_battery_hook *hook) struct acpi_battery *battery; mutex_lock(&hook_mutex); - INIT_LIST_HEAD(&hook->list); list_add(&hook->list, &battery_hook_list); /* * Now that the driver is registered, we need @@ -721,29 +739,46 @@ void battery_hook_register(struct acpi_battery_hook *hook) * its attributes. */ list_for_each_entry(battery, &acpi_battery_list, list) { - if (hook->add_battery(battery->bat)) { + if (hook->add_battery(battery->bat, hook)) { /* * If a add-battery returns non-zero, - * the registration of the extension has failed, + * the registration of the hook has failed, * and we will not add it to the list of loaded * hooks. */ - pr_err("extension failed to load: %s", hook->name); - __battery_hook_unregister(hook, 0); + pr_err("hook failed to load: %s", hook->name); + battery_hook_unregister_unlocked(hook); goto end; } + + power_supply_changed(battery->bat); } - pr_info("new extension: %s\n", hook->name); + pr_info("new hook: %s\n", hook->name); end: mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_register); +static void devm_battery_hook_unregister(void *data) +{ + struct acpi_battery_hook *hook = data; + + battery_hook_unregister(hook); +} + +int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook) +{ + battery_hook_register(hook); + + return devm_add_action_or_reset(dev, devm_battery_hook_unregister, hook); +} +EXPORT_SYMBOL_GPL(devm_battery_hook_register); + /* * This function gets called right after the battery sysfs * attributes have been added, so that the drivers that * define custom sysfs attributes can add their own. -*/ + */ static void battery_hook_add_battery(struct acpi_battery *battery) { struct acpi_battery_hook *hook_node, *tmp; @@ -759,14 +794,14 @@ static void battery_hook_add_battery(struct acpi_battery *battery) * during the battery module initialization. */ list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) { - if (hook_node->add_battery(battery->bat)) { + if (hook_node->add_battery(battery->bat, hook_node)) { /* - * The notification of the extensions has failed, to - * prevent further errors we will unload the extension. + * The notification of the hook has failed, to + * prevent further errors we will unload the hook. */ - pr_err("error in extension, unloading: %s", + pr_err("error in hook, unloading: %s", hook_node->name); - __battery_hook_unregister(hook_node, 0); + battery_hook_unregister_unlocked(hook_node); } } mutex_unlock(&hook_mutex); @@ -782,7 +817,7 @@ static void battery_hook_remove_battery(struct acpi_battery *battery) * custom attributes from the battery. */ list_for_each_entry(hook, &battery_hook_list, list) { - hook->remove_battery(battery->bat); + hook->remove_battery(battery->bat, hook); } /* Then, just remove the battery from the list */ list_del(&battery->list); @@ -799,35 +834,53 @@ static void __exit battery_hook_exit(void) * need to remove the hooks. */ list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) { - __battery_hook_unregister(hook, 1); + battery_hook_unregister(hook); } mutex_destroy(&hook_mutex); } static int sysfs_add_battery(struct acpi_battery *battery) { - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + .no_wakeup_source = true, + }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_cap_broken = true; if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { - battery->bat_desc.properties = charge_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(charge_battery_props); - } else if (battery->full_charge_capacity == 0) { - battery->bat_desc.properties = - energy_battery_full_cap_broken_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_full_cap_broken_props); + if (full_cap_broken) { + battery->bat_desc.properties = + charge_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = charge_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_props); + } } else { - battery->bat_desc.properties = energy_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_props); + if (full_cap_broken) { + battery->bat_desc.properties = + energy_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = energy_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_props); + } } battery->bat_desc.name = acpi_device_bid(battery->device); battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY; battery->bat_desc.get_property = acpi_battery_get_property; - battery->bat = power_supply_register_no_ws(&battery->device->dev, + battery->bat = power_supply_register(&battery->device->dev, &battery->bat_desc, &psy_cfg); if (IS_ERR(battery->bat)) { @@ -837,31 +890,29 @@ static int sysfs_add_battery(struct acpi_battery *battery) return result; } battery_hook_add_battery(battery); - return device_create_file(&battery->bat->dev, &alarm_attr); + return 0; } static void sysfs_remove_battery(struct acpi_battery *battery) { - mutex_lock(&battery->sysfs_lock); - if (!battery->bat) { - mutex_unlock(&battery->sysfs_lock); + if (!battery->bat) return; - } + battery_hook_remove_battery(battery); - device_remove_file(&battery->bat->dev, &alarm_attr); power_supply_unregister(battery->bat); battery->bat = NULL; - mutex_unlock(&battery->sysfs_lock); } static void find_battery(const struct dmi_header *dm, void *private) { struct acpi_battery *battery = (struct acpi_battery *)private; /* Note: the hardcoded offsets below have been extracted from - the source code of dmidecode. */ + * the source code of dmidecode. + */ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { const u8 *dmi_data = (const u8 *)(dm + 1); int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); + if (dm->length >= 18) dmi_capacity *= dmi_data[17]; if (battery->design_capacity * battery->design_voltage / 1000 @@ -903,6 +954,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { const char *s; + s = dmi_get_system_info(DMI_PRODUCT_VERSION); if (s && !strncasecmp(s, "ThinkPad", 8)) { dmi_walk(find_battery, battery); @@ -974,7 +1026,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) */ if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && - (battery->capacity_now <= battery->alarm))) + (battery->capacity_now <= battery->alarm))) acpi_pm_wakeup_event(&battery->device->dev); return result; @@ -999,245 +1051,25 @@ static void acpi_battery_refresh(struct acpi_battery *battery) sysfs_add_battery(battery); } -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI_PROCFS_POWER -static struct proc_dir_entry *acpi_battery_dir; - -static const char *acpi_battery_units(const struct acpi_battery *battery) -{ - return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? - "mA" : "mW"; -} - -static int acpi_battery_info_proc_show(struct seq_file *seq, void *offset) -{ - struct acpi_battery *battery = seq->private; - int result = acpi_battery_update(battery, false); - - if (result) - goto end; - - seq_printf(seq, "present: %s\n", - acpi_battery_present(battery) ? "yes" : "no"); - if (!acpi_battery_present(battery)) - goto end; - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "design capacity: unknown\n"); - else - seq_printf(seq, "design capacity: %d %sh\n", - battery->design_capacity, - acpi_battery_units(battery)); - - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "last full capacity: unknown\n"); - else - seq_printf(seq, "last full capacity: %d %sh\n", - battery->full_charge_capacity, - acpi_battery_units(battery)); - - seq_printf(seq, "battery technology: %srechargeable\n", - battery->technology ? "" : "non-"); - - if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "design voltage: unknown\n"); - else - seq_printf(seq, "design voltage: %d mV\n", - battery->design_voltage); - seq_printf(seq, "design capacity warning: %d %sh\n", - battery->design_capacity_warning, - acpi_battery_units(battery)); - seq_printf(seq, "design capacity low: %d %sh\n", - battery->design_capacity_low, - acpi_battery_units(battery)); - seq_printf(seq, "cycle count: %i\n", battery->cycle_count); - seq_printf(seq, "capacity granularity 1: %d %sh\n", - battery->capacity_granularity_1, - acpi_battery_units(battery)); - seq_printf(seq, "capacity granularity 2: %d %sh\n", - battery->capacity_granularity_2, - acpi_battery_units(battery)); - seq_printf(seq, "model number: %s\n", battery->model_number); - seq_printf(seq, "serial number: %s\n", battery->serial_number); - seq_printf(seq, "battery type: %s\n", battery->type); - seq_printf(seq, "OEM info: %s\n", battery->oem_info); - end: - if (result) - seq_printf(seq, "ERROR: Unable to read battery info\n"); - return result; -} - -static int acpi_battery_state_proc_show(struct seq_file *seq, void *offset) -{ - struct acpi_battery *battery = seq->private; - int result = acpi_battery_update(battery, false); - - if (result) - goto end; - - seq_printf(seq, "present: %s\n", - acpi_battery_present(battery) ? "yes" : "no"); - if (!acpi_battery_present(battery)) - goto end; - - seq_printf(seq, "capacity state: %s\n", - (battery->state & 0x04) ? "critical" : "ok"); - if ((battery->state & 0x01) && (battery->state & 0x02)) - seq_printf(seq, - "charging state: charging/discharging\n"); - else if (battery->state & 0x01) - seq_printf(seq, "charging state: discharging\n"); - else if (battery->state & 0x02) - seq_printf(seq, "charging state: charging\n"); - else - seq_printf(seq, "charging state: charged\n"); - - if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "present rate: unknown\n"); - else - seq_printf(seq, "present rate: %d %s\n", - battery->rate_now, acpi_battery_units(battery)); - - if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "remaining capacity: unknown\n"); - else - seq_printf(seq, "remaining capacity: %d %sh\n", - battery->capacity_now, acpi_battery_units(battery)); - if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) - seq_printf(seq, "present voltage: unknown\n"); - else - seq_printf(seq, "present voltage: %d mV\n", - battery->voltage_now); - end: - if (result) - seq_printf(seq, "ERROR: Unable to read battery state\n"); - - return result; -} - -static int acpi_battery_alarm_proc_show(struct seq_file *seq, void *offset) -{ - struct acpi_battery *battery = seq->private; - int result = acpi_battery_update(battery, false); - - if (result) - goto end; - - if (!acpi_battery_present(battery)) { - seq_printf(seq, "present: no\n"); - goto end; - } - seq_printf(seq, "alarm: "); - if (battery->alarm) { - seq_printf(seq, "%u %sh\n", battery->alarm, - acpi_battery_units(battery)); - } else { - seq_printf(seq, "unsupported\n"); - } - end: - if (result) - seq_printf(seq, "ERROR: Unable to read battery alarm\n"); - return result; -} - -static ssize_t acpi_battery_write_alarm(struct file *file, - const char __user * buffer, - size_t count, loff_t * ppos) -{ - int result = 0; - char alarm_string[12] = { '\0' }; - struct seq_file *m = file->private_data; - struct acpi_battery *battery = m->private; - - if (!battery || (count > sizeof(alarm_string) - 1)) - return -EINVAL; - if (!acpi_battery_present(battery)) { - result = -ENODEV; - goto end; - } - if (copy_from_user(alarm_string, buffer, count)) { - result = -EFAULT; - goto end; - } - alarm_string[count] = '\0'; - if (kstrtoint(alarm_string, 0, &battery->alarm)) { - result = -EINVAL; - goto end; - } - result = acpi_battery_set_alarm(battery); - end: - if (result) - return result; - return count; -} - -static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_battery_alarm_proc_show, PDE_DATA(inode)); -} - -static const struct file_operations acpi_battery_alarm_fops = { - .owner = THIS_MODULE, - .open = acpi_battery_alarm_proc_open, - .read = seq_read, - .write = acpi_battery_write_alarm, - .llseek = seq_lseek, - .release = single_release, -}; - -static int acpi_battery_add_fs(struct acpi_device *device) -{ - pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); - if (!acpi_device_dir(device)) { - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), - acpi_battery_dir); - if (!acpi_device_dir(device)) - return -ENODEV; - } - - if (!proc_create_single_data("info", S_IRUGO, acpi_device_dir(device), - acpi_battery_info_proc_show, acpi_driver_data(device))) - return -ENODEV; - if (!proc_create_single_data("state", S_IRUGO, acpi_device_dir(device), - acpi_battery_state_proc_show, acpi_driver_data(device))) - return -ENODEV; - if (!proc_create_data("alarm", S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device), &acpi_battery_alarm_fops, - acpi_driver_data(device))) - return -ENODEV; - return 0; -} - -static void acpi_battery_remove_fs(struct acpi_device *device) -{ - if (!acpi_device_dir(device)) - return; - remove_proc_subtree(acpi_device_bid(device), acpi_battery_dir); - acpi_device_dir(device) = NULL; -} - -#endif - -/* -------------------------------------------------------------------------- - Driver Interface - -------------------------------------------------------------------------- */ - -static void acpi_battery_notify(struct acpi_device *device, u32 event) +/* Driver Interface */ +static void acpi_battery_notify(acpi_handle handle, u32 event, void *data) { + struct acpi_device *device = data; struct acpi_battery *battery = acpi_driver_data(device); struct power_supply *old; if (!battery) return; + + guard(mutex)(&battery->update_lock); + old = battery->bat; /* - * On Acer Aspire V5-573G notifications are sometimes triggered too - * early. For example, when AC is unplugged and notification is - * triggered, battery state is still reported as "Full", and changes to - * "Discharging" only after short delay, without any notification. - */ + * On Acer Aspire V5-573G notifications are sometimes triggered too + * early. For example, when AC is unplugged and notification is + * triggered, battery state is still reported as "Full", and changes to + * "Discharging" only after short delay, without any notification. + */ if (battery_notification_delay_ms > 0) msleep(battery_notification_delay_ms); if (event == ACPI_BATTERY_NOTIFY_INFO) @@ -1253,21 +1085,22 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) } static int battery_notify(struct notifier_block *nb, - unsigned long mode, void *_unused) + unsigned long mode, void *_unused) { struct acpi_battery *battery = container_of(nb, struct acpi_battery, pm_nb); - int result; - switch (mode) { - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: + if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) { + guard(mutex)(&battery->update_lock); + if (!acpi_battery_present(battery)) return 0; if (battery->bat) { acpi_battery_refresh(battery); } else { + int result; + result = acpi_battery_get_info(battery); if (result) return result; @@ -1279,7 +1112,6 @@ static int battery_notify(struct notifier_block *nb, acpi_battery_init_alarm(battery); acpi_battery_get_state(battery); - break; } return 0; @@ -1306,13 +1138,6 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d) return 0; } -static int __init -battery_do_not_check_pmic_quirk(const struct dmi_system_id *d) -{ - battery_check_pmic = 0; - return 0; -} - static const struct dmi_system_id bat_dmi_table[] __initconst = { { /* NEC LZ750/LS */ @@ -1342,19 +1167,11 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { }, }, { - /* ECS EF20EA */ - .callback = battery_do_not_check_pmic_quirk, - .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), - }, - }, - { - /* Lenovo Ideapad Miix 320 */ - .callback = battery_do_not_check_pmic_quirk, + /* Microsoft Surface Go 3 */ + .callback = battery_notification_delay_quirk, .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, {}, @@ -1372,6 +1189,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) { int retry, ret; + guard(mutex)(&battery->update_lock); + for (retry = 5; retry; retry--) { ret = acpi_battery_update(battery, false); if (!ret) @@ -1382,10 +1201,17 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) return ret; } +static void sysfs_battery_cleanup(struct acpi_battery *battery) +{ + guard(mutex)(&battery->update_lock); + + sysfs_remove_battery(battery); +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; - struct acpi_battery *battery = NULL; + struct acpi_battery *battery; if (!device) return -EINVAL; @@ -1393,15 +1219,18 @@ static int acpi_battery_add(struct acpi_device *device) if (device->dep_unmet) return -EPROBE_DEFER; - battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); + battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL); if (!battery) return -ENOMEM; battery->device = device; - strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); + strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS); device->driver_data = battery; - mutex_init(&battery->lock); - mutex_init(&battery->sysfs_lock); + + result = devm_mutex_init(&device->dev, &battery->update_lock); + if (result) + return result; + if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); @@ -1409,53 +1238,52 @@ static int acpi_battery_add(struct acpi_device *device) if (result) goto fail; -#ifdef CONFIG_ACPI_PROCFS_POWER - result = acpi_battery_add_fs(device); - if (result) { - acpi_battery_remove_fs(device); - goto fail; - } -#endif - - pr_info(PREFIX "%s Slot [%s] (battery %s)\n", - ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), + pr_info("Slot [%s] (battery %s)\n", acpi_device_bid(device), device->status.battery_present ? "present" : "absent"); battery->pm_nb.notifier_call = battery_notify; - register_pm_notifier(&battery->pm_nb); + result = register_pm_notifier(&battery->pm_nb); + if (result) + goto fail; device_init_wakeup(&device->dev, 1); - return result; + result = acpi_dev_install_notify_handler(device, ACPI_ALL_NOTIFY, + acpi_battery_notify, device); + if (result) + goto fail_pm; + + return 0; +fail_pm: + device_init_wakeup(&device->dev, 0); + unregister_pm_notifier(&battery->pm_nb); fail: - sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); + sysfs_battery_cleanup(battery); + return result; } -static int acpi_battery_remove(struct acpi_device *device) +static void acpi_battery_remove(struct acpi_device *device) { - struct acpi_battery *battery = NULL; + struct acpi_battery *battery; if (!device || !acpi_driver_data(device)) - return -EINVAL; - device_init_wakeup(&device->dev, 0); + return; + battery = acpi_driver_data(device); + + acpi_dev_remove_notify_handler(device, ACPI_ALL_NOTIFY, + acpi_battery_notify); + + device_init_wakeup(&device->dev, 0); unregister_pm_notifier(&battery->pm_nb); -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_battery_remove_fs(device); -#endif + + guard(mutex)(&battery->update_lock); + sysfs_remove_battery(battery); - mutex_destroy(&battery->lock); - mutex_destroy(&battery->sysfs_lock); - kfree(battery); - return 0; } -#ifdef CONFIG_PM_SLEEP /* this is needed to learn about changes made in suspended state */ static int acpi_battery_resume(struct device *dev) { @@ -1469,78 +1297,41 @@ static int acpi_battery_resume(struct device *dev) return -EINVAL; battery->update_time = 0; + + guard(mutex)(&battery->update_lock); + acpi_battery_update(battery, true); return 0; } -#else -#define acpi_battery_resume NULL -#endif -static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); static struct acpi_driver acpi_battery_driver = { .name = "battery", .class = ACPI_BATTERY_CLASS, .ids = battery_device_ids, - .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, .ops = { .add = acpi_battery_add, .remove = acpi_battery_remove, - .notify = acpi_battery_notify, }, - .drv.pm = &acpi_battery_pm, + .drv.pm = pm_sleep_ptr(&acpi_battery_pm), + .drv.probe_type = PROBE_PREFER_ASYNCHRONOUS, }; -static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) -{ - unsigned int i; - int result; - - dmi_check_system(bat_dmi_table); - - if (battery_check_pmic) { - for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++) - if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) { - pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME - ": found native %s PMIC, not loading\n", - acpi_battery_blacklist[i]); - return; - } - } - -#ifdef CONFIG_ACPI_PROCFS_POWER - acpi_battery_dir = acpi_lock_battery_dir(); - if (!acpi_battery_dir) - return; -#endif - result = acpi_bus_register_driver(&acpi_battery_driver); -#ifdef CONFIG_ACPI_PROCFS_POWER - if (result < 0) - acpi_unlock_battery_dir(acpi_battery_dir); -#endif - battery_driver_registered = (result == 0); -} - static int __init acpi_battery_init(void) { - if (acpi_disabled) + if (acpi_disabled || acpi_quirk_skip_acpi_ac_and_battery()) return -ENODEV; - async_cookie = async_schedule(acpi_battery_init_async, NULL); - return 0; + dmi_check_system(bat_dmi_table); + + return acpi_bus_register_driver(&acpi_battery_driver); } static void __exit acpi_battery_exit(void) { - async_synchronize_cookie(async_cookie + 1); - if (battery_driver_registered) { - acpi_bus_unregister_driver(&acpi_battery_driver); - battery_hook_exit(); - } -#ifdef CONFIG_ACPI_PROCFS_POWER - if (acpi_battery_dir) - acpi_unlock_battery_dir(acpi_battery_dir); -#endif + acpi_bus_unregister_driver(&acpi_battery_driver); + battery_hook_exit(); } module_init(acpi_battery_init); diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c index 75af78361ce5..0fdd581ef96f 100644 --- a/drivers/acpi/bgrt.c +++ b/drivers/acpi/bgrt.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * BGRT boot graphic support * Authors: Matthew Garrett, Josh Triplett <josh@joshtriplett.org> * Copyright 2012 Red Hat, Inc <mjg@redhat.com> * Copyright 2012 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/kernel.h> @@ -18,60 +15,32 @@ static void *bgrt_image; static struct kobject *bgrt_kobj; -static ssize_t show_version(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version); -} -static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); - -static ssize_t show_status(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status); -} -static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); - -static ssize_t show_type(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type); -} -static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); +#define BGRT_SHOW(_name, _member) \ + static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + return sysfs_emit(buf, "%d\n", bgrt_tab._member); \ + } \ + static struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name) -static ssize_t show_xoffset(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x); -} -static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL); - -static ssize_t show_yoffset(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y); -} -static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); - -static ssize_t image_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, size_t count) -{ - memcpy(buf, attr->private + off, count); - return count; -} +BGRT_SHOW(version, version); +BGRT_SHOW(status, status); +BGRT_SHOW(type, image_type); +BGRT_SHOW(xoffset, image_offset_x); +BGRT_SHOW(yoffset, image_offset_y); -static BIN_ATTR_RO(image, 0); /* size gets filled in later */ +static __ro_after_init BIN_ATTR_SIMPLE_RO(image); static struct attribute *bgrt_attributes[] = { - &dev_attr_version.attr, - &dev_attr_status.attr, - &dev_attr_type.attr, - &dev_attr_xoffset.attr, - &dev_attr_yoffset.attr, + &bgrt_attr_version.attr, + &bgrt_attr_status.attr, + &bgrt_attr_type.attr, + &bgrt_attr_xoffset.attr, + &bgrt_attr_yoffset.attr, NULL, }; -static struct bin_attribute *bgrt_bin_attributes[] = { +static const struct bin_attribute *const bgrt_bin_attributes[] = { &bin_attr_image, NULL, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..a984ccd4a2a0 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1,23 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_bus.c - ACPI Bus Driver ($Revision: 80 $) * * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> @@ -37,16 +26,14 @@ #include <asm/mpspec.h> #include <linux/dmi.h> #endif -#include <linux/acpi_iort.h> +#include <linux/acpi_viot.h> #include <linux/pci.h> #include <acpi/apei.h> #include <linux/suspend.h> +#include <linux/prmt.h> #include "internal.h" -#define _COMPONENT ACPI_BUS_COMPONENT -ACPI_MODULE_NAME("bus"); - struct acpi_device *acpi_root; struct proc_dir_entry *acpi_root_dir; EXPORT_SYMBOL(acpi_root_dir); @@ -60,8 +47,7 @@ static inline int set_copy_dsdt(const struct dmi_system_id *id) #else static int set_copy_dsdt(const struct dmi_system_id *id) { - printk(KERN_NOTICE "%s detected - " - "force copy of DSDT to local memory\n", id->ident); + pr_notice("%s detected - force copy of DSDT to local memory\n", id->ident); acpi_gbl_copy_dsdt_locally = 1; return 0; } @@ -111,8 +97,8 @@ int acpi_bus_get_status(struct acpi_device *device) acpi_status status; unsigned long long sta; - if (acpi_device_always_present(device)) { - acpi_set_device_status(device, ACPI_STA_DEFAULT); + if (acpi_device_override_status(device, &sta)) { + acpi_set_device_status(device, sta); return 0; } @@ -126,16 +112,25 @@ int acpi_bus_get_status(struct acpi_device *device) if (ACPI_FAILURE(status)) return -ENODEV; + if (!device->status.present && device->status.enabled) { + pr_info(FW_BUG "Device [%s] status [%08x]: not present and enabled\n", + device->pnp.bus_id, (u32)sta); + device->status.enabled = 0; + /* + * The status is clearly invalid, so clear the functional bit as + * well to avoid attempting to use the device. + */ + device->status.functional = 0; + } + acpi_set_device_status(device, sta); if (device->status.functional && !device->status.present) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: " - "functional but not present;\n", - device->pnp.bus_id, (u32)sta)); + pr_debug("Device [%s] status [%08x]: functional but not present\n", + device->pnp.bus_id, (u32)sta); } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", - device->pnp.bus_id, (u32)sta)); + pr_debug("Device [%s] status [%08x]\n", device->pnp.bus_id, (u32)sta); return 0; } EXPORT_SYMBOL(acpi_bus_get_status); @@ -166,7 +161,7 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) { acpi_status status; - if (!*data) + if (!data) return -EINVAL; status = acpi_get_data(handle, acpi_bus_private_data_handler, data); @@ -279,8 +274,6 @@ out_success: out_kfree: kfree(output.pointer); - if (status != AE_OK) - context->ret.pointer = NULL; return status; } EXPORT_SYMBOL(acpi_run_osc); @@ -294,10 +287,32 @@ bool osc_sb_apei_support_acked; bool osc_pc_lpi_support_confirmed; EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed); +/* + * ACPI 6.2 Section 6.2.11.2 'Platform-Wide OSPM Capabilities': + * Starting with ACPI Specification 6.2, all _CPC registers can be in + * PCC, System Memory, System IO, or Functional Fixed Hardware address + * spaces. OSPM support for this more flexible register space scheme is + * indicated by the “Flexible Address Space for CPPC Registers” _OSC bit. + * + * Otherwise (cf ACPI 6.1, s8.4.7.1.1.X), _CPC registers must be in: + * - PCC or Functional Fixed Hardware address space if defined + * - SystemMemory address space (NULL register) if not defined + */ +bool osc_cpc_flexible_adr_space_confirmed; +EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed); + +/* + * ACPI 6.4 Operating System Capabilities for USB. + */ +bool osc_sb_native_usb4_support_confirmed; +EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed); + +bool osc_sb_cppc2_support_acked; + static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; -static void acpi_bus_osc_support(void) +static void acpi_bus_osc_negotiate_platform_control(void) { - u32 capbuf[2]; + u32 capbuf[2], *capbuf_ret; struct acpi_osc_context context = { .uuid_str = sb_uuid_str, .rev = 1, @@ -312,35 +327,173 @@ static void acpi_bus_osc_support(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_THERMAL)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_BATTERY)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; - + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_PRMT)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_FFH)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FFH_OPR_SUPPORT; + +#ifdef CONFIG_ARM64 + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT; +#endif #ifdef CONFIG_X86 - if (boot_cpu_has(X86_FEATURE_HWP)) { - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT; - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT; - } + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT; #endif +#ifdef CONFIG_ACPI_CPPC_LIB + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT; +#endif + + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_FLEXIBLE_ADR_SPACE; + if (IS_ENABLED(CONFIG_SCHED_MC_PRIO)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT; + if (IS_ENABLED(CONFIG_USB4)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_NATIVE_USB4_SUPPORT; + if (!ghes_disable) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT; if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) return; - if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { - u32 *capbuf_ret = context.ret.pointer; - if (context.ret.length > OSC_SUPPORT_DWORD) { - osc_sb_apei_support_acked = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; - osc_pc_lpi_support_confirmed = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; - } + + if (ACPI_FAILURE(acpi_run_osc(handle, &context))) + return; + + capbuf_ret = context.ret.pointer; + if (context.ret.length <= OSC_SUPPORT_DWORD) { kfree(context.ret.pointer); + return; + } + + /* + * Now run _OSC again with query flag clear and with the caps + * supported by both the OS and the platform. + */ + capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD]; + kfree(context.ret.pointer); + + if (ACPI_FAILURE(acpi_run_osc(handle, &context))) + return; + + capbuf_ret = context.ret.pointer; + if (context.ret.length > OSC_SUPPORT_DWORD) { +#ifdef CONFIG_ACPI_CPPC_LIB + osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT; +#endif + + osc_sb_apei_support_acked = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; + osc_pc_lpi_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; + osc_sb_native_usb4_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; + osc_cpc_flexible_adr_space_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPC_FLEXIBLE_ADR_SPACE; + } + + kfree(context.ret.pointer); +} + +/* + * Native control of USB4 capabilities. If any of the tunneling bits is + * set it means OS is in control and we use software based connection + * manager. + */ +u32 osc_sb_native_usb4_control; +EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control); + +static void acpi_bus_decode_usb_osc(const char *msg, u32 bits) +{ + pr_info("%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg, + (bits & OSC_USB_USB3_TUNNELING) ? '+' : '-', + (bits & OSC_USB_DP_TUNNELING) ? '+' : '-', + (bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-', + (bits & OSC_USB_XDOMAIN) ? '+' : '-'); +} + +static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A"; +static void acpi_bus_osc_negotiate_usb_control(void) +{ + u32 capbuf[3], *capbuf_ret; + struct acpi_osc_context context = { + .uuid_str = sb_usb_uuid_str, + .rev = 1, + .cap.length = sizeof(capbuf), + .cap.pointer = capbuf, + }; + acpi_handle handle; + acpi_status status; + u32 control; + + if (!osc_sb_native_usb4_support_confirmed) + return; + + if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) + return; + + control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING | + OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN; + + /* + * Run _OSC first with query bit set, trying to get control over + * all tunneling. The platform can then clear out bits in the + * control dword that it does not want to grant to the OS. + */ + capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; + capbuf[OSC_SUPPORT_DWORD] = 0; + capbuf[OSC_CONTROL_DWORD] = control; + + status = acpi_run_osc(handle, &context); + if (ACPI_FAILURE(status)) + return; + + if (context.ret.length != sizeof(capbuf)) { + pr_info("USB4 _OSC: returned invalid length buffer\n"); + goto out_free; } - /* do we need to check other returned cap? Sounds no */ + + /* + * Run _OSC again now with query bit clear and the control dword + * matching what the platform granted (which may not have all + * the control bits set). + */ + capbuf_ret = context.ret.pointer; + + capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD]; + + kfree(context.ret.pointer); + + status = acpi_run_osc(handle, &context); + if (ACPI_FAILURE(status)) + return; + + if (context.ret.length != sizeof(capbuf)) { + pr_info("USB4 _OSC: returned invalid length buffer\n"); + goto out_free; + } + + osc_sb_native_usb4_control = + control & acpi_osc_ctx_get_pci_control(&context); + + acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control); + acpi_bus_decode_usb_osc("USB4 _OSC: OS controls", + osc_sb_native_usb4_control); + +out_free: + kfree(context.ret.pointer); } /* -------------------------------------------------------------------------- @@ -348,142 +501,126 @@ static void acpi_bus_osc_support(void) -------------------------------------------------------------------------- */ /** - * acpi_bus_notify - * --------------- - * Callback for all 'system-level' device notifications (values 0x00-0x7F). + * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler + * @handle: Target ACPI object. + * @type: Notification type. + * @data: Ignored. + * + * This only handles notifications related to device hotplug. */ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) { struct acpi_device *adev; - struct acpi_driver *driver; - u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; - bool hotplug_event = false; switch (type) { case ACPI_NOTIFY_BUS_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); - hotplug_event = true; break; case ACPI_NOTIFY_DEVICE_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); - hotplug_event = true; break; case ACPI_NOTIFY_DEVICE_WAKE: acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n"); - break; + return; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); - hotplug_event = true; break; case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n"); /* TBD: Exactly what does 'light' mean? */ - break; + return; case ACPI_NOTIFY_FREQUENCY_MISMATCH: acpi_handle_err(handle, "Device cannot be configured due " "to a frequency mismatch\n"); - break; + return; case ACPI_NOTIFY_BUS_MODE_MISMATCH: acpi_handle_err(handle, "Device cannot be configured due " "to a bus mode mismatch\n"); - break; + return; case ACPI_NOTIFY_POWER_FAULT: acpi_handle_err(handle, "Device has suffered a power fault\n"); - break; + return; default: acpi_handle_debug(handle, "Unknown event type 0x%x\n", type); - break; - } - - adev = acpi_bus_get_acpi_device(handle); - if (!adev) - goto err; - - driver = adev->driver; - if (driver && driver->ops.notify && - (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) - driver->ops.notify(adev, type); - - if (!hotplug_event) { - acpi_bus_put_acpi_device(adev); return; } - if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) + adev = acpi_get_acpi_dev(handle); + + if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) return; - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); - err: - acpi_evaluate_ost(handle, type, ost_code, NULL); + acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); } -static void acpi_device_notify(acpi_handle handle, u32 event, void *data) +static void acpi_notify_device(acpi_handle handle, u32 event, void *data) { struct acpi_device *device = data; + struct acpi_driver *acpi_drv = to_acpi_driver(device->dev.driver); - device->driver->ops.notify(device, event); + acpi_drv->ops.notify(device, event); } -static void acpi_device_notify_fixed(void *data) +static int acpi_device_install_notify_handler(struct acpi_device *device, + struct acpi_driver *acpi_drv) { - struct acpi_device *device = data; + u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? + ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY; + acpi_status status; - /* Fixed hardware devices have no handles */ - acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); + status = acpi_install_notify_handler(device->handle, type, + acpi_notify_device, device); + if (ACPI_FAILURE(status)) + return -EINVAL; + + return 0; } -static u32 acpi_device_fixed_event(void *data) +static void acpi_device_remove_notify_handler(struct acpi_device *device, + struct acpi_driver *acpi_drv) { - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); - return ACPI_INTERRUPT_HANDLED; + u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ? + ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY; + + acpi_remove_notify_handler(device->handle, type, + acpi_notify_device); + + acpi_os_wait_events_complete(); } -static int acpi_device_install_notify_handler(struct acpi_device *device) +int acpi_dev_install_notify_handler(struct acpi_device *adev, + u32 handler_type, + acpi_notify_handler handler, void *context) { acpi_status status; - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) - status = - acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event, - device); - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) - status = - acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event, - device); - else - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_device_notify, - device); - + status = acpi_install_notify_handler(adev->handle, handler_type, + handler, context); if (ACPI_FAILURE(status)) - return -EINVAL; + return -ENODEV; + return 0; } +EXPORT_SYMBOL_GPL(acpi_dev_install_notify_handler); -static void acpi_device_remove_notify_handler(struct acpi_device *device) +void acpi_dev_remove_notify_handler(struct acpi_device *adev, + u32 handler_type, + acpi_notify_handler handler) { - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) - acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event); - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) - acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event); - else - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_device_notify); + acpi_remove_notify_handler(adev->handle, handler_type, handler); + acpi_os_wait_events_complete(); } +EXPORT_SYMBOL_GPL(acpi_dev_remove_notify_handler); /* Handle events targeting \_SB device (at present only graceful shutdown) */ @@ -517,8 +654,9 @@ static void acpi_sb_notify(acpi_handle handle, u32 event, void *data) if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) { if (!work_busy(&acpi_sb_work)) schedule_work(&acpi_sb_work); - } else + } else { pr_warn("event %x is not supported by \\_SB device\n", event); + } } static int __init acpi_setup_sb_notify_handler(void) @@ -564,6 +702,7 @@ struct device *acpi_get_first_physical_node(struct acpi_device *adev) mutex_unlock(physical_node_lock); return phys_dev; } +EXPORT_SYMBOL_GPL(acpi_get_first_physical_node); static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev, const struct device *dev) @@ -611,7 +750,7 @@ bool acpi_device_is_first_physical_node(struct acpi_device *adev, * resources available from it but they will be matched normally using functions * provided by their bus types (and analogously for their modalias). */ -struct acpi_device *acpi_companion_match(const struct device *dev) +const struct acpi_device *acpi_companion_match(const struct device *dev) { struct acpi_device *adev; @@ -635,7 +774,7 @@ struct acpi_device *acpi_companion_match(const struct device *dev) * identifiers and a _DSD object with the "compatible" property, use that * property to match against the given list of identifiers. */ -static bool acpi_of_match_device(struct acpi_device *adev, +static bool acpi_of_match_device(const struct acpi_device *adev, const struct of_device_id *of_match_table, const struct of_device_id **of_id) { @@ -689,7 +828,7 @@ static bool acpi_of_modalias(struct acpi_device *adev, str = obj->string.pointer; chr = strchr(str, ','); - strlcpy(modalias, chr ? chr + 1 : str, len); + strscpy(modalias, chr ? chr + 1 : str, len); return true; } @@ -701,15 +840,16 @@ static bool acpi_of_modalias(struct acpi_device *adev, * @modalias: Pointer to buffer that modalias value will be copied into * @len: Length of modalias buffer * - * This is a counterpart of of_modalias_node() for struct acpi_device objects. - * If there is a compatible string for @adev, it will be copied to @modalias - * with the vendor prefix stripped; otherwise, @default_id will be used. + * This is a counterpart of of_alias_from_compatible() for struct acpi_device + * objects. If there is a compatible string for @adev, it will be copied to + * @modalias with the vendor prefix stripped; otherwise, @default_id will be + * used. */ void acpi_set_modalias(struct acpi_device *adev, const char *default_id, char *modalias, size_t len) { if (!acpi_of_modalias(adev, modalias, len)) - strlcpy(modalias, default_id, len); + strscpy(modalias, default_id, len); } EXPORT_SYMBOL_GPL(acpi_set_modalias); @@ -736,7 +876,7 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id, return true; } -static bool __acpi_match_device(struct acpi_device *device, +static bool __acpi_match_device(const struct acpi_device *device, const struct acpi_device_id *acpi_ids, const struct of_device_id *of_ids, const struct acpi_device_id **acpi_id, @@ -779,6 +919,26 @@ out_acpi_match: } /** + * acpi_match_acpi_device - Match an ACPI device against a given list of ACPI IDs + * @ids: Array of struct acpi_device_id objects to match against. + * @adev: The ACPI device pointer to match. + * + * Match the ACPI device @adev against a given list of ACPI IDs @ids. + * + * Return: + * a pointer to the first matching ACPI ID on success or %NULL on failure. + */ +const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids, + const struct acpi_device *adev) +{ + const struct acpi_device_id *id = NULL; + + __acpi_match_device(adev, ids, NULL, &id, NULL); + return id; +} +EXPORT_SYMBOL_GPL(acpi_match_acpi_device); + +/** * acpi_match_device - Match a struct device against a given list of ACPI IDs * @ids: Array of struct acpi_device_id object to match against. * @dev: The device structure to match. @@ -792,18 +952,30 @@ out_acpi_match: const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev) { - const struct acpi_device_id *id = NULL; - - __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL); - return id; + return acpi_match_acpi_device(ids, acpi_companion_match(dev)); } EXPORT_SYMBOL_GPL(acpi_match_device); +static const void *acpi_of_device_get_match_data(const struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + const struct of_device_id *match = NULL; + + if (!acpi_of_match_device(adev, dev->driver->of_match_table, &match)) + return NULL; + + return match->data; +} + const void *acpi_device_get_match_data(const struct device *dev) { + const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table; const struct acpi_device_id *match; - match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!acpi_ids) + return acpi_of_device_get_match_data(dev); + + match = acpi_match_device(acpi_ids, dev); if (!match) return NULL; @@ -821,14 +993,13 @@ EXPORT_SYMBOL(acpi_match_device_ids); bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { - if (!drv->acpi_match_table) - return acpi_of_match_device(ACPI_COMPANION(dev), - drv->of_match_table, - NULL); - - return __acpi_match_device(acpi_companion_match(dev), - drv->acpi_match_table, drv->of_match_table, - NULL, NULL); + const struct acpi_device_id *acpi_ids = drv->acpi_match_table; + const struct of_device_id *of_ids = drv->of_match_table; + + if (!acpi_ids) + return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL); + + return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL); } EXPORT_SYMBOL_GPL(acpi_driver_match_device); @@ -837,28 +1008,26 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device); -------------------------------------------------------------------------- */ /** - * acpi_bus_register_driver - register a driver with the ACPI bus + * __acpi_bus_register_driver - register a driver with the ACPI bus * @driver: driver being registered + * @owner: owning module/driver * * Registers a driver with the ACPI bus. Searches the namespace for all * devices that match the driver's criteria and binds. Returns zero for * success or a negative error status for failure. */ -int acpi_bus_register_driver(struct acpi_driver *driver) +int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner) { - int ret; - if (acpi_disabled) return -ENODEV; driver->drv.name = driver->name; driver->drv.bus = &acpi_bus_type; - driver->drv.owner = driver->owner; + driver->drv.owner = owner; - ret = driver_register(&driver->drv); - return ret; + return driver_register(&driver->drv); } -EXPORT_SYMBOL(acpi_bus_register_driver); +EXPORT_SYMBOL(__acpi_bus_register_driver); /** * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus @@ -878,16 +1047,16 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver); ACPI Bus operations -------------------------------------------------------------------------- */ -static int acpi_bus_match(struct device *dev, struct device_driver *drv) +static int acpi_bus_match(struct device *dev, const struct device_driver *drv) { struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = to_acpi_driver(drv); + const struct acpi_driver *acpi_drv = to_acpi_driver(drv); return acpi_dev->flags.match_driver && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); } -static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) +static int acpi_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { return __acpi_device_uevent_modalias(to_acpi_device(dev), env); } @@ -905,51 +1074,49 @@ static int acpi_device_probe(struct device *dev) return -ENOSYS; ret = acpi_drv->ops.add(acpi_dev); - if (ret) + if (ret) { + acpi_dev->driver_data = NULL; return ret; + } - acpi_dev->driver = acpi_drv; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Driver [%s] successfully bound to device [%s]\n", - acpi_drv->name, acpi_dev->pnp.bus_id)); + pr_debug("Driver [%s] successfully bound to device [%s]\n", + acpi_drv->name, acpi_dev->pnp.bus_id); if (acpi_drv->ops.notify) { - ret = acpi_device_install_notify_handler(acpi_dev); + ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv); if (ret) { if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev); - acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; return ret; } } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", - acpi_drv->name, acpi_dev->pnp.bus_id)); + pr_debug("Found driver [%s] for device [%s]\n", acpi_drv->name, + acpi_dev->pnp.bus_id); + get_device(dev); return 0; } -static int acpi_device_remove(struct device *dev) +static void acpi_device_remove(struct device *dev) { struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = acpi_dev->driver; + struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); + + if (acpi_drv->ops.notify) + acpi_device_remove_notify_handler(acpi_dev, acpi_drv); + + if (acpi_drv->ops.remove) + acpi_drv->ops.remove(acpi_dev); - if (acpi_drv) { - if (acpi_drv->ops.notify) - acpi_device_remove_notify_handler(acpi_dev); - if (acpi_drv->ops.remove) - acpi_drv->ops.remove(acpi_dev); - } - acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; put_device(dev); - return 0; } -struct bus_type acpi_bus_type = { +const struct bus_type acpi_bus_type = { .name = "acpi", .match = acpi_bus_match, .probe = acpi_device_probe, @@ -957,6 +1124,51 @@ struct bus_type acpi_bus_type = { .uevent = acpi_device_uevent, }; +int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data) +{ + return bus_for_each_dev(&acpi_bus_type, NULL, data, fn); +} +EXPORT_SYMBOL_GPL(acpi_bus_for_each_dev); + +struct acpi_dev_walk_context { + int (*fn)(struct acpi_device *, void *); + void *data; +}; + +static int acpi_dev_for_one_check(struct device *dev, void *context) +{ + struct acpi_dev_walk_context *adwc = context; + + if (dev->bus != &acpi_bus_type) + return 0; + + return adwc->fn(to_acpi_device(dev), adwc->data); +} +EXPORT_SYMBOL_GPL(acpi_dev_for_each_child); + +int acpi_dev_for_each_child(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), void *data) +{ + struct acpi_dev_walk_context adwc = { + .fn = fn, + .data = data, + }; + + return device_for_each_child(&adev->dev, &adwc, acpi_dev_for_one_check); +} + +int acpi_dev_for_each_child_reverse(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), + void *data) +{ + struct acpi_dev_walk_context adwc = { + .fn = fn, + .data = data, + }; + + return device_for_each_child_reverse(&adev->dev, &adwc, acpi_dev_for_one_check); +} + /* -------------------------------------------------------------------------- Initialization/Cleanup -------------------------------------------------------------------------- */ @@ -988,16 +1200,22 @@ static int __init acpi_bus_init_irq(void) case ACPI_IRQ_MODEL_PLATFORM: message = "platform specific model"; break; + case ACPI_IRQ_MODEL_LPIC: + message = "LPIC"; + break; + case ACPI_IRQ_MODEL_RINTC: + message = "RINTC"; + break; default: - printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n"); + pr_info("Unknown interrupt routing model\n"); return -ENODEV; } - printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message); + pr_info("Using %s for interrupt routing\n", message); status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model); if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC")); + pr_info("_PIC evaluation failed: %s\n", acpi_format_exception(status)); return -ENODEV; } @@ -1021,7 +1239,7 @@ void __init acpi_early_init(void) if (acpi_disabled) return; - printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); + pr_info("Core revision %08x\n", ACPI_CA_VERSION); /* enable workarounds, unless strict ACPI spec. compliance */ if (!acpi_strict) @@ -1042,15 +1260,13 @@ void __init acpi_early_init(void) status = acpi_reallocate_root_table(); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to reallocate ACPI tables\n"); + pr_err("Unable to reallocate ACPI tables\n"); goto error0; } status = acpi_initialize_subsystem(); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to initialize the ACPI Interpreter\n"); + pr_err("Unable to initialize the ACPI Interpreter\n"); goto error0; } @@ -1096,7 +1312,7 @@ void __init acpi_subsystem_init(void) status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); + pr_err("Unable to enable ACPI\n"); disable_acpi(); } else { /* @@ -1111,7 +1327,8 @@ void __init acpi_subsystem_init(void) static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context) { - acpi_scan_table_handler(event, table, context); + if (event == ACPI_TABLE_EVENT_LOAD) + acpi_scan_table_notify(); return acpi_sysfs_table_handler(event, table, context); } @@ -1125,8 +1342,7 @@ static int __init acpi_bus_init(void) status = acpi_load_tables(); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to load the System Description Tables\n"); + pr_err("Unable to load the System Description Tables\n"); goto error1; } @@ -1144,25 +1360,22 @@ static int __init acpi_bus_init(void) status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to start the ACPI Interpreter\n"); + pr_err("Unable to start the ACPI Interpreter\n"); goto error1; } status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); + pr_err("Unable to initialize ACPI objects\n"); goto error1; } - /* Set capability bits for _OSC under processor scope */ - acpi_early_processor_osc(); - /* * _OSC method may exist in module level code, * so it must be run after ACPI_FULL_INITIALIZATION */ - acpi_bus_osc_support(); + acpi_bus_osc_negotiate_platform_control(); + acpi_bus_osc_negotiate_usb_control(); /* * _PDC control method may load dynamic SSDT tables, @@ -1172,7 +1385,7 @@ static int __init acpi_bus_init(void) acpi_sysfs_init(); - acpi_early_processor_set_pdc(); + acpi_early_processor_control_setup(); /* * Maybe EC region is required at bus_scan/acpi_get_devices. So it @@ -1180,7 +1393,7 @@ static int __init acpi_bus_init(void) */ acpi_ec_dsdt_probe(); - printk(KERN_INFO PREFIX "Interpreter enabled\n"); + pr_info("Interpreter enabled\n"); /* Initialize sleep structures */ acpi_sleep_init(); @@ -1193,14 +1406,13 @@ static int __init acpi_bus_init(void) goto error1; /* - * Register the for all standard device notifications. + * Register for all standard device notifications. */ status = acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY, &acpi_bus_notify, NULL); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to register for device notifications\n"); + pr_err("Unable to register for system notifications\n"); goto error1; } @@ -1222,29 +1434,38 @@ static int __init acpi_bus_init(void) struct kobject *acpi_kobj; EXPORT_SYMBOL_GPL(acpi_kobj); +void __weak __init acpi_arch_init(void) { } + static int __init acpi_init(void) { int result; if (acpi_disabled) { - printk(KERN_INFO PREFIX "Interpreter disabled.\n"); + pr_info("Interpreter disabled.\n"); return -ENODEV; } acpi_kobj = kobject_create_and_add("acpi", firmware_kobj); if (!acpi_kobj) { - printk(KERN_WARNING "%s: kset create error\n", __func__); - acpi_kobj = NULL; + pr_err("Failed to register kobject\n"); + return -ENOMEM; } + init_prmt(); + acpi_init_pcc(); result = acpi_bus_init(); if (result) { + kobject_put(acpi_kobj); disable_acpi(); return result; } + acpi_init_ffh(); pci_mmcfg_late_init(); - acpi_iort_init(); + acpi_viot_early_init(); + acpi_hest_init(); + acpi_ghes_init(); + acpi_arch_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); @@ -1252,6 +1473,7 @@ static int __init acpi_init(void) acpi_wakeup_device_init(); acpi_debugger_init(); acpi_setup_sb_notify_handler(); + acpi_viot_init(); return 0; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index a19ff3977ac4..3c6dd9b4ba0a 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * button.c - ACPI Button Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) "ACPI: button: " fmt @@ -34,35 +21,37 @@ #include <linux/dmi.h> #include <acpi/button.h> -#define PREFIX "ACPI: " - #define ACPI_BUTTON_CLASS "button" -#define ACPI_BUTTON_FILE_INFO "info" #define ACPI_BUTTON_FILE_STATE "state" #define ACPI_BUTTON_TYPE_UNKNOWN 0x00 +#define ACPI_BUTTON_NOTIFY_WAKE 0x02 #define ACPI_BUTTON_NOTIFY_STATUS 0x80 #define ACPI_BUTTON_SUBCLASS_POWER "power" -#define ACPI_BUTTON_HID_POWER "PNP0C0C" #define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button" #define ACPI_BUTTON_TYPE_POWER 0x01 #define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" -#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" #define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button" #define ACPI_BUTTON_TYPE_SLEEP 0x03 #define ACPI_BUTTON_SUBCLASS_LID "lid" -#define ACPI_BUTTON_HID_LID "PNP0C0D" #define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" #define ACPI_BUTTON_TYPE_LID 0x05 -#define ACPI_BUTTON_LID_INIT_IGNORE 0x00 -#define ACPI_BUTTON_LID_INIT_OPEN 0x01 -#define ACPI_BUTTON_LID_INIT_METHOD 0x02 +enum { + ACPI_BUTTON_LID_INIT_IGNORE, + ACPI_BUTTON_LID_INIT_OPEN, + ACPI_BUTTON_LID_INIT_METHOD, + ACPI_BUTTON_LID_INIT_DISABLED, +}; -#define _COMPONENT ACPI_BUTTON_COMPONENT -ACPI_MODULE_NAME("button"); +static const char * const lid_init_state_str[] = { + [ACPI_BUTTON_LID_INIT_IGNORE] = "ignore", + [ACPI_BUTTON_LID_INIT_OPEN] = "open", + [ACPI_BUTTON_LID_INIT_METHOD] = "method", + [ACPI_BUTTON_LID_INIT_DISABLED] = "disabled", +}; MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Button Driver"); @@ -78,25 +67,86 @@ static const struct acpi_device_id button_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, button_device_ids); -/* - * Some devices which don't even have a lid in anyway have a broken _LID - * method (e.g. pointing to a floating gpio pin) causing spurious LID events. - */ -static const struct dmi_system_id lid_blacklst[] = { +/* Please keep this list sorted alphabetically by vendor and model */ +static const struct dmi_system_id dmi_lid_quirks[] = { { - /* GP-electronic T701 */ + /* GP-electronic T701, _LID method points to a floating GPIO */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "T701"), DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"), }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, + }, + { + /* Nextbook Ares 8A tablet, _LID device always reports lid closed */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, + }, + { + /* + * Lenovo Yoga 9 14ITL5, initial notification of the LID device + * never happens. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82BG"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Medion Akoya E2215T, notification of the LID device only + * happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Medion Akoya E2228T, notification of the LID device only + * happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Razer Blade Stealth 13 late 2019, notification of the LID device + * only happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Samsung galaxybook2 ,initial _LID device notification returns + * lid closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "750XED"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, {} }; static int acpi_button_add(struct acpi_device *device); -static int acpi_button_remove(struct acpi_device *device); -static void acpi_button_notify(struct acpi_device *device, u32 event); +static void acpi_button_remove(struct acpi_device *device); #ifdef CONFIG_PM_SLEEP static int acpi_button_suspend(struct device *dev); @@ -114,7 +164,6 @@ static struct acpi_driver acpi_button_driver = { .ops = { .add = acpi_button_add, .remove = acpi_button_remove, - .notify = acpi_button_notify, }, .drv.pm = &acpi_button_pm, }; @@ -127,20 +176,17 @@ struct acpi_button { int last_state; ktime_t last_time; bool suspended; + bool lid_state_initialized; }; -static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); static struct acpi_device *lid_device; -static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; +static long lid_init_state = -1; static unsigned long lid_report_interval __read_mostly = 500; module_param(lid_report_interval, ulong, 0644); MODULE_PARM_DESC(lid_report_interval, "Interval (ms) between lid key events"); -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - +/* FS Interface (/proc) */ static struct proc_dir_entry *acpi_button_dir; static struct proc_dir_entry *acpi_lid_dir; @@ -159,7 +205,6 @@ static int acpi_lid_evaluate_state(struct acpi_device *device) static int acpi_lid_notify_state(struct acpi_device *device, int state) { struct acpi_button *button = acpi_driver_data(device); - int ret; ktime_t next_report; bool do_update; @@ -236,18 +281,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) button->last_time = ktime_get(); } - ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); - if (ret == NOTIFY_DONE) - ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, - device); - if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { - /* - * It is also regarded as success if the notifier_chain - * returns NOTIFY_OK or NOTIFY_DONE. - */ - ret = 0; - } - return ret; + return 0; } static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq, @@ -273,7 +307,7 @@ static int acpi_button_add_fs(struct acpi_device *device) return 0; if (acpi_button_dir || acpi_lid_dir) { - printk(KERN_ERR PREFIX "More than one Lid device found!\n"); + pr_info("More than one Lid device found!\n"); return -EEXIST; } @@ -341,21 +375,7 @@ static int acpi_button_remove_fs(struct acpi_device *device) return 0; } -/* -------------------------------------------------------------------------- - Driver Interface - -------------------------------------------------------------------------- */ -int acpi_lid_notifier_register(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&acpi_lid_notifier, nb); -} -EXPORT_SYMBOL(acpi_lid_notifier_register); - -int acpi_lid_notifier_unregister(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); -} -EXPORT_SYMBOL(acpi_lid_notifier_unregister); - +/* Driver Interface */ int acpi_lid_open(void) { if (!lid_device) @@ -382,6 +402,8 @@ static int acpi_lid_update_state(struct acpi_device *device, static void acpi_lid_initialize_state(struct acpi_device *device) { + struct acpi_button *button = acpi_driver_data(device); + switch (lid_init_state) { case ACPI_BUTTON_LID_INIT_OPEN: (void)acpi_lid_notify_state(device, 1); @@ -393,51 +415,74 @@ static void acpi_lid_initialize_state(struct acpi_device *device) default: break; } + + button->lid_state_initialized = true; } -static void acpi_button_notify(struct acpi_device *device, u32 event) +static void acpi_lid_notify(acpi_handle handle, u32 event, void *data) { - struct acpi_button *button = acpi_driver_data(device); + struct acpi_device *device = data; + struct acpi_button *button; + + if (event != ACPI_BUTTON_NOTIFY_STATUS) { + acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", + event); + return; + } + + button = acpi_driver_data(device); + if (!button->lid_state_initialized) + return; + + acpi_lid_update_state(device, true); +} + +static void acpi_button_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_device *device = data; + struct acpi_button *button; struct input_dev *input; - int users; + int keycode; switch (event) { - case ACPI_FIXED_HARDWARE_EVENT: - event = ACPI_BUTTON_NOTIFY_STATUS; - /* fall through */ case ACPI_BUTTON_NOTIFY_STATUS: - input = button->input; - if (button->type == ACPI_BUTTON_TYPE_LID) { - mutex_lock(&button->input->mutex); - users = button->input->users; - mutex_unlock(&button->input->mutex); - if (users) - acpi_lid_update_state(device, true); - } else { - int keycode; - - acpi_pm_wakeup_event(&device->dev); - if (button->suspended) - break; - - keycode = test_bit(KEY_SLEEP, input->keybit) ? - KEY_SLEEP : KEY_POWER; - input_report_key(input, keycode, 1); - input_sync(input); - input_report_key(input, keycode, 0); - input_sync(input); - - acpi_bus_generate_netlink_event( - device->pnp.device_class, - dev_name(&device->dev), - event, ++button->pushed); - } break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); + case ACPI_BUTTON_NOTIFY_WAKE: break; + default: + acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", + event); + return; } + + acpi_pm_wakeup_event(&device->dev); + + button = acpi_driver_data(device); + if (button->suspended || event == ACPI_BUTTON_NOTIFY_WAKE) + return; + + input = button->input; + keycode = test_bit(KEY_SLEEP, input->keybit) ? KEY_SLEEP : KEY_POWER; + + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), + event, ++button->pushed); +} + +static void acpi_button_notify_run(void *data) +{ + acpi_button_notify(NULL, ACPI_BUTTON_NOTIFY_STATUS, data); +} + +static u32 acpi_button_event(void *data) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_button_notify_run, data); + return ACPI_INTERRUPT_HANDLED; } #ifdef CONFIG_PM_SLEEP @@ -452,12 +497,24 @@ static int acpi_button_suspend(struct device *dev) static int acpi_button_resume(struct device *dev) { + struct input_dev *input; struct acpi_device *device = to_acpi_device(dev); struct acpi_button *button = acpi_driver_data(device); button->suspended = false; - if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) + if (button->type == ACPI_BUTTON_TYPE_LID) { + button->last_state = !!acpi_lid_evaluate_state(device); + button->last_time = ktime_get(); acpi_lid_initialize_state(device); + } + + if (button->type == ACPI_BUTTON_TYPE_POWER) { + input = button->input; + input_report_key(input, KEY_WAKEUP, 1); + input_sync(input); + input_report_key(input, KEY_WAKEUP, 0); + input_sync(input); + } return 0; } #endif @@ -476,13 +533,16 @@ static int acpi_lid_input_open(struct input_dev *input) static int acpi_button_add(struct acpi_device *device) { + acpi_notify_handler handler; struct acpi_button *button; struct input_dev *input; const char *hid = acpi_device_hid(device); + acpi_status status; char *name, *class; - int error; + int error = 0; - if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst)) + if (!strcmp(hid, ACPI_BUTTON_HID_LID) && + lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED) return -ENODEV; button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL); @@ -503,30 +563,36 @@ static int acpi_button_add(struct acpi_device *device) if (!strcmp(hid, ACPI_BUTTON_HID_POWER) || !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { button->type = ACPI_BUTTON_TYPE_POWER; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); + handler = acpi_button_notify; + strscpy(name, ACPI_BUTTON_DEVICE_NAME_POWER, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { button->type = ACPI_BUTTON_TYPE_SLEEP; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); + handler = acpi_button_notify; + strscpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { button->type = ACPI_BUTTON_TYPE_LID; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); + handler = acpi_lid_notify; + strscpy(name, ACPI_BUTTON_DEVICE_NAME_LID, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); input->open = acpi_lid_input_open; } else { - printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); + pr_info("Unsupported hid [%s]\n", hid); error = -ENODEV; - goto err_free_input; } - error = acpi_button_add_fs(device); - if (error) - goto err_free_input; + if (!error) + error = acpi_button_add_fs(device); + + if (error) { + input_free_device(input); + goto err_free_button; + } snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); @@ -539,6 +605,7 @@ static int acpi_button_add(struct acpi_device *device) switch (button->type) { case ACPI_BUTTON_TYPE_POWER: input_set_capability(input, EV_KEY, KEY_POWER); + input_set_capability(input, EV_KEY, KEY_WAKEUP); break; case ACPI_BUTTON_TYPE_SLEEP: @@ -552,8 +619,33 @@ static int acpi_button_add(struct acpi_device *device) input_set_drvdata(input, device); error = input_register_device(input); - if (error) + if (error) { + input_free_device(input); goto err_remove_fs; + } + + switch (device->device_type) { + case ACPI_BUS_TYPE_POWER_BUTTON: + status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_button_event, + device); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_button_event, + device); + break; + default: + status = acpi_install_notify_handler(device->handle, + ACPI_ALL_NOTIFY, handler, + device); + break; + } + if (ACPI_FAILURE(status)) { + error = -ENODEV; + goto err_input_unregister; + } + if (button->type == ACPI_BUTTON_TYPE_LID) { /* * This assumes there's only one lid device, or if there are @@ -563,61 +655,72 @@ static int acpi_button_add(struct acpi_device *device) } device_init_wakeup(&device->dev, true); - printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); + pr_info("%s [%s]\n", name, acpi_device_bid(device)); return 0; - err_remove_fs: +err_input_unregister: + input_unregister_device(input); +err_remove_fs: acpi_button_remove_fs(device); - err_free_input: - input_free_device(input); - err_free_button: +err_free_button: kfree(button); return error; } -static int acpi_button_remove(struct acpi_device *device) +static void acpi_button_remove(struct acpi_device *device) { struct acpi_button *button = acpi_driver_data(device); + switch (device->device_type) { + case ACPI_BUS_TYPE_POWER_BUTTON: + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_button_event); + break; + case ACPI_BUS_TYPE_SLEEP_BUTTON: + acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_button_event); + break; + default: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + button->type == ACPI_BUTTON_TYPE_LID ? + acpi_lid_notify : + acpi_button_notify); + break; + } + acpi_os_wait_events_complete(); + acpi_button_remove_fs(device); input_unregister_device(button->input); kfree(button); - return 0; } static int param_set_lid_init_state(const char *val, const struct kernel_param *kp) { - int result = 0; - - if (!strncmp(val, "open", sizeof("open") - 1)) { - lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; - pr_info("Notify initial lid state as open\n"); - } else if (!strncmp(val, "method", sizeof("method") - 1)) { - lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; - pr_info("Notify initial lid state with _LID return value\n"); - } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { - lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; - pr_info("Do not notify initial lid state\n"); - } else - result = -EINVAL; - return result; + int i; + + i = sysfs_match_string(lid_init_state_str, val); + if (i < 0) + return i; + + lid_init_state = i; + pr_info("Initial lid state set to '%s'\n", lid_init_state_str[i]); + return 0; } -static int param_get_lid_init_state(char *buffer, - const struct kernel_param *kp) +static int param_get_lid_init_state(char *buf, const struct kernel_param *kp) { - switch (lid_init_state) { - case ACPI_BUTTON_LID_INIT_OPEN: - return sprintf(buffer, "open"); - case ACPI_BUTTON_LID_INIT_METHOD: - return sprintf(buffer, "method"); - case ACPI_BUTTON_LID_INIT_IGNORE: - return sprintf(buffer, "ignore"); - default: - return sprintf(buffer, "invalid"); - } - return 0; + int i, c = 0; + + for (i = 0; i < ARRAY_SIZE(lid_init_state_str); i++) + if (i == lid_init_state) + c += sprintf(buf + c, "[%s] ", lid_init_state_str[i]); + else + c += sprintf(buf + c, "%s ", lid_init_state_str[i]); + + buf[c - 1] = '\n'; /* Replace the final space with a newline */ + + return c; } module_param_call(lid_init_state, @@ -627,6 +730,16 @@ MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); static int acpi_button_register_driver(struct acpi_driver *driver) { + const struct dmi_system_id *dmi_id; + + if (lid_init_state == -1) { + dmi_id = dmi_first_match(dmi_lid_quirks); + if (dmi_id) + lid_init_state = (long)dmi_id->driver_data; + else + lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; + } + /* * Modules such as nouveau.ko and i915.ko have a link time dependency * on acpi_lid_open(), and would therefore not be loadable on ACPI diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c deleted file mode 100644 index d0918d421f90..000000000000 --- a/drivers/acpi/cm_sbs.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/acpi.h> -#include <linux/types.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> - -#define PREFIX "ACPI: " - -ACPI_MODULE_NAME("cm_sbs"); -#define ACPI_AC_CLASS "ac_adapter" -#define ACPI_BATTERY_CLASS "battery" -#define _COMPONENT ACPI_SBS_COMPONENT -static struct proc_dir_entry *acpi_ac_dir; -static struct proc_dir_entry *acpi_battery_dir; - -static DEFINE_MUTEX(cm_sbs_mutex); - -static int lock_ac_dir_cnt; -static int lock_battery_dir_cnt; - -struct proc_dir_entry *acpi_lock_ac_dir(void) -{ - mutex_lock(&cm_sbs_mutex); - if (!acpi_ac_dir) - acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); - if (acpi_ac_dir) { - lock_ac_dir_cnt++; - } else { - printk(KERN_ERR PREFIX - "Cannot create %s\n", ACPI_AC_CLASS); - } - mutex_unlock(&cm_sbs_mutex); - return acpi_ac_dir; -} -EXPORT_SYMBOL(acpi_lock_ac_dir); - -void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) -{ - mutex_lock(&cm_sbs_mutex); - if (acpi_ac_dir_param) - lock_ac_dir_cnt--; - if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { - remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); - acpi_ac_dir = NULL; - } - mutex_unlock(&cm_sbs_mutex); -} -EXPORT_SYMBOL(acpi_unlock_ac_dir); - -struct proc_dir_entry *acpi_lock_battery_dir(void) -{ - mutex_lock(&cm_sbs_mutex); - if (!acpi_battery_dir) { - acpi_battery_dir = - proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); - } - if (acpi_battery_dir) { - lock_battery_dir_cnt++; - } else { - printk(KERN_ERR PREFIX - "Cannot create %s\n", ACPI_BATTERY_CLASS); - } - mutex_unlock(&cm_sbs_mutex); - return acpi_battery_dir; -} -EXPORT_SYMBOL(acpi_lock_battery_dir); - -void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) -{ - mutex_lock(&cm_sbs_mutex); - if (acpi_battery_dir_param) - lock_battery_dir_cnt--; - if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param - && acpi_battery_dir) { - remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); - acpi_battery_dir = NULL; - } - mutex_unlock(&cm_sbs_mutex); - return; -} -EXPORT_SYMBOL(acpi_unlock_battery_dir); diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 12c240903c18..5b7e3b9ae370 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * container.c - ACPI Generic Container Driver * @@ -7,29 +8,12 @@ * Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004, 2013 Intel Corp. * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/acpi.h> #include <linux/container.h> #include "internal.h" -#define _COMPONENT ACPI_CONTAINER_COMPONENT -ACPI_MODULE_NAME("container"); - static const struct acpi_device_id container_device_ids[] = { {"ACPI0004", 0}, {"PNP0A05", 0}, @@ -39,17 +23,18 @@ static const struct acpi_device_id container_device_ids[] = { #ifdef CONFIG_ACPI_CONTAINER -static int acpi_container_offline(struct container_dev *cdev) +static int check_offline(struct acpi_device *adev, void *not_used) { - struct acpi_device *adev = ACPI_COMPANION(&cdev->dev); - struct acpi_device *child; + if (acpi_scan_is_offline(adev, false)) + return 0; - /* Check all of the dependent devices' physical companions. */ - list_for_each_entry(child, &adev->children, node) - if (!acpi_scan_is_offline(child, false)) - return -EBUSY; + return -EBUSY; +} - return 0; +static int acpi_container_offline(struct container_dev *cdev) +{ + /* Check all of the dependent devices' physical companions. */ + return acpi_dev_for_each_child(ACPI_COMPANION(&cdev->dev), check_offline, NULL); } static void acpi_container_release(struct device *dev) diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 217a782c3e55..3bdeeee3414e 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. * * (C) Copyright 2014, 2015 Linaro Ltd. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - * * CPPC describes a few methods for controlling CPU performance using * information from a per CPU table called CPC. This table is described in * the ACPI v5.0+ specification. The table consists of a list of @@ -37,18 +33,20 @@ #define pr_fmt(fmt) "ACPI CPPC: " fmt -#include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/ktime.h> #include <linux/rwsem.h> #include <linux/wait.h> +#include <linux/topology.h> +#include <linux/dmi.h> +#include <linux/units.h> +#include <linux/unaligned.h> #include <acpi/cppc_acpi.h> struct cppc_pcc_data { - struct mbox_chan *pcc_channel; - void __iomem *pcc_comm_addr; + struct pcc_mbox_chan *pcc_channel; bool pcc_channel_acquired; unsigned int deadline_us; unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; @@ -81,9 +79,9 @@ struct cppc_pcc_data { int refcount; }; -/* Array to represent the PCC channel per subspace id */ +/* Array to represent the PCC channel per subspace ID */ static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; -/* The cpu_pcc_subspace_idx containsper CPU subspace id */ +/* The cpu_pcc_subspace_idx contains per CPU subspace ID */ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); /* @@ -96,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); /* pcc mapped address + header size + offset within PCC subspace */ -#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ +#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \ 0x8 + (offs)) /* Check if a CPC register is in PCC */ @@ -104,17 +102,46 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); (cpc)->cpc_entry.reg.space_id == \ ACPI_ADR_SPACE_PLATFORM_COMM) -/* Evalutes to True if reg is a NULL register descriptor */ +/* Check if a CPC register is in FFH */ +#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_FIXED_HARDWARE) + +/* Check if a CPC register is in SystemMemory */ +#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_SYSTEM_MEMORY) + +/* Check if a CPC register is in SystemIo */ +#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_SYSTEM_IO) + +/* Evaluates to True if reg is a NULL register descriptor */ #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ (reg)->address == 0 && \ (reg)->bit_width == 0 && \ (reg)->bit_offset == 0 && \ (reg)->access_width == 0) -/* Evalutes to True if an optional cpc field is supported */ +/* Evaluates to True if an optional cpc field is supported */ #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ !!(cpc)->cpc_entry.int_value : \ !IS_NULL_REG(&(cpc)->cpc_entry.reg)) + +/* + * Each bit indicates the optionality of the register in per-cpu + * cpc_regs[] with the corresponding index. 0 means mandatory and 1 + * means optional. + */ +#define REG_OPTIONAL (0x1FC7D0) + +/* + * Use the index of the register in per-cpu cpc_regs[] to check if + * it's an optional one. + */ +#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx))) + /* * Arbitrary Retries in case the remote processor is slow to respond * to PCC commands. Keeping it high enough to cover emulators where @@ -122,23 +149,17 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); */ #define NUM_RETRIES 500ULL -struct cppc_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *kobj, - struct attribute *attr, const char *c, ssize_t count); -}; +#define OVER_16BTS_MASK ~0xFFFFULL #define define_one_cppc_ro(_name) \ -static struct cppc_attr _name = \ +static struct kobj_attribute _name = \ __ATTR(_name, 0444, show_##_name, NULL) #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) #define show_cppc_data(access_fn, struct_name, member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ - struct attribute *attr, char *buf) \ + struct kobj_attribute *attr, char *buf) \ { \ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ struct struct_name st_name = {0}; \ @@ -148,7 +169,7 @@ __ATTR(_name, 0444, show_##_name, NULL) if (ret) \ return ret; \ \ - return scnprintf(buf, PAGE_SIZE, "%llu\n", \ + return sysfs_emit(buf, "%llu\n", \ (u64)st_name.member_name); \ } \ define_one_cppc_ro(member_name) @@ -157,14 +178,25 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); +show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); +/* Check for valid access_width, otherwise, fallback to using bit_width */ +#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) + +/* Shift and apply the mask for CPC reads/writes */ +#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \ + GENMASK(((reg)->bit_width) - 1, 0)) +#define MASK_VAL_WRITE(reg, prev_val, val) \ + ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \ + ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \ + static ssize_t show_feedback_ctrs(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); struct cppc_perf_fb_ctrs fb_ctrs = {0}; @@ -174,7 +206,7 @@ static ssize_t show_feedback_ctrs(struct kobject *kobj, if (ret) return ret; - return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", + return sysfs_emit(buf, "ref:%llu del:%llu\n", fb_ctrs.reference, fb_ctrs.delivered); } define_one_cppc_ro(feedback_ctrs); @@ -186,15 +218,17 @@ static struct attribute *cppc_attrs[] = { &highest_perf.attr, &lowest_perf.attr, &lowest_nonlinear_perf.attr, + &guaranteed_perf.attr, &nominal_perf.attr, &nominal_freq.attr, &lowest_freq.attr, NULL }; +ATTRIBUTE_GROUPS(cppc); -static struct kobj_type cppc_ktype = { +static const struct kobj_type cppc_ktype = { .sysfs_ops = &kobj_sysfs_ops, - .default_attrs = cppc_attrs, + .default_groups = cppc_groups, }; static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) @@ -202,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) int ret, status; struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; struct acpi_pcct_shared_memory __iomem *generic_comm_base = - pcc_ss_data->pcc_comm_addr; + pcc_ss_data->pcc_channel->shmem; if (!pcc_ss_data->platform_owns_pcc) return 0; @@ -236,8 +270,8 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd) { int ret = -EIO, i; struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; - struct acpi_pcct_shared_memory *generic_comm_base = - (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = + pcc_ss_data->pcc_channel->shmem; unsigned int time_delta; /* @@ -307,29 +341,30 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd) pcc_ss_data->platform_owns_pcc = true; /* Ring doorbell */ - ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); + ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n", pcc_ss_id, cmd, ret); goto end; } - /* wait for completion and check for PCC errro bit */ + /* wait for completion and check for PCC error bit */ ret = check_pcc_chan(pcc_ss_id, true); if (pcc_ss_data->pcc_mrtt) pcc_ss_data->last_cmd_cmpl_time = ktime_get(); - if (pcc_ss_data->pcc_channel->mbox->txdone_irq) - mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); + if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq) + mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret); else - mbox_client_txdone(pcc_ss_data->pcc_channel, ret); + mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret); end: if (cmd == CMD_WRITE) { if (unlikely(ret)) { for_each_possible_cpu(i) { struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); + if (!desc) continue; @@ -354,7 +389,7 @@ static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) *(u16 *)msg, ret); } -struct mbox_client cppc_mbox_cl = { +static struct mbox_client cppc_mbox_cl = { .tx_done = cppc_chan_tx_done, .knows_txdone = true, }; @@ -369,8 +404,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) union acpi_object *psd = NULL; struct acpi_psd_package *pdomain; - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, - ACPI_TYPE_PACKAGE); + status = acpi_evaluate_object_typed(handle, "_PSD", NULL, + &buffer, ACPI_TYPE_PACKAGE); + if (status == AE_NOT_FOUND) /* _PSD is optional */ + return 0; if (ACPI_FAILURE(status)) return -ENODEV; @@ -415,180 +452,139 @@ end: return result; } +bool acpi_cpc_valid(void) +{ + struct cpc_desc *cpc_ptr; + int cpu; + + if (acpi_disabled) + return false; + + for_each_online_cpu(cpu) { + cpc_ptr = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_ptr) + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(acpi_cpc_valid); + +bool cppc_allow_fast_switch(void) +{ + struct cpc_register_resource *desired_reg; + struct cpc_desc *cpc_ptr; + int cpu; + + for_each_online_cpu(cpu) { + cpc_ptr = per_cpu(cpc_desc_ptr, cpu); + desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; + if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && + !CPC_IN_SYSTEM_IO(desired_reg)) + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(cppc_allow_fast_switch); + /** - * acpi_get_psd_map - Map the CPUs in a common freq domain. - * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. + * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu + * @cpu: Find all CPUs that share a domain with cpu. + * @cpu_data: Pointer to CPU specific CPPC data including PSD info. * * Return: 0 for success or negative value for err. */ -int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) +int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data) { - int count_target; - int retval = 0; - unsigned int i, j; - cpumask_var_t covered_cpus; - struct cppc_cpudata *pr, *match_pr; - struct acpi_psd_package *pdomain; - struct acpi_psd_package *match_pdomain; struct cpc_desc *cpc_ptr, *match_cpc_ptr; - - if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) - return -ENOMEM; + struct acpi_psd_package *match_pdomain; + struct acpi_psd_package *pdomain; + int count_target, i; /* - * Now that we have _PSD data from all CPUs, lets setup P-state + * Now that we have _PSD data from all CPUs, let's setup P-state * domain info. */ - for_each_possible_cpu(i) { - pr = all_cpu_data[i]; - if (!pr) - continue; + cpc_ptr = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_ptr) + return -EFAULT; - if (cpumask_test_cpu(i, covered_cpus)) - continue; + pdomain = &(cpc_ptr->domain_info); + cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); + if (pdomain->num_processors <= 1) + return 0; - cpc_ptr = per_cpu(cpc_desc_ptr, i); - if (!cpc_ptr) { - retval = -EFAULT; - goto err_ret; - } + /* Validate the Domain info */ + count_target = pdomain->num_processors; + if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) + cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL; + else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) + cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW; + else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) + cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; - pdomain = &(cpc_ptr->domain_info); - cpumask_set_cpu(i, pr->shared_cpu_map); - cpumask_set_cpu(i, covered_cpus); - if (pdomain->num_processors <= 1) + for_each_possible_cpu(i) { + if (i == cpu) continue; - /* Validate the Domain info */ - count_target = pdomain->num_processors; - if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) - pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; - else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) - pr->shared_type = CPUFREQ_SHARED_TYPE_HW; - else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) - pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; - - for_each_possible_cpu(j) { - if (i == j) - continue; - - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); - if (!match_cpc_ptr) { - retval = -EFAULT; - goto err_ret; - } - - match_pdomain = &(match_cpc_ptr->domain_info); - if (match_pdomain->domain != pdomain->domain) - continue; + match_cpc_ptr = per_cpu(cpc_desc_ptr, i); + if (!match_cpc_ptr) + goto err_fault; - /* Here i and j are in the same domain */ - if (match_pdomain->num_processors != count_target) { - retval = -EFAULT; - goto err_ret; - } - - if (pdomain->coord_type != match_pdomain->coord_type) { - retval = -EFAULT; - goto err_ret; - } - - cpumask_set_cpu(j, covered_cpus); - cpumask_set_cpu(j, pr->shared_cpu_map); - } - - for_each_possible_cpu(j) { - if (i == j) - continue; - - match_pr = all_cpu_data[j]; - if (!match_pr) - continue; + match_pdomain = &(match_cpc_ptr->domain_info); + if (match_pdomain->domain != pdomain->domain) + continue; - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); - if (!match_cpc_ptr) { - retval = -EFAULT; - goto err_ret; - } + /* Here i and cpu are in the same domain */ + if (match_pdomain->num_processors != count_target) + goto err_fault; - match_pdomain = &(match_cpc_ptr->domain_info); - if (match_pdomain->domain != pdomain->domain) - continue; + if (pdomain->coord_type != match_pdomain->coord_type) + goto err_fault; - match_pr->shared_type = pr->shared_type; - cpumask_copy(match_pr->shared_cpu_map, - pr->shared_cpu_map); - } + cpumask_set_cpu(i, cpu_data->shared_cpu_map); } -err_ret: - for_each_possible_cpu(i) { - pr = all_cpu_data[i]; - if (!pr) - continue; + return 0; - /* Assume no coordination on any error parsing domain info */ - if (retval) { - cpumask_clear(pr->shared_cpu_map); - cpumask_set_cpu(i, pr->shared_cpu_map); - pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; - } - } +err_fault: + /* Assume no coordination on any error parsing domain info */ + cpumask_clear(cpu_data->shared_cpu_map); + cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); + cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE; - free_cpumask_var(covered_cpus); - return retval; + return -EFAULT; } EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(int pcc_ss_idx) { - struct acpi_pcct_hw_reduced *cppc_ss; + struct pcc_mbox_chan *pcc_chan; u64 usecs_lat; if (pcc_ss_idx >= 0) { - pcc_data[pcc_ss_idx]->pcc_channel = - pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); + pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); - if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { + if (IS_ERR(pcc_chan)) { pr_err("Failed to find PCC channel for subspace %d\n", pcc_ss_idx); return -ENODEV; } - /* - * The PCC mailbox controller driver should - * have parsed the PCCT (global table of all - * PCC channels) and stored pointers to the - * subspace communication region in con_priv. - */ - cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; - - if (!cppc_ss) { - pr_err("No PCC subspace found for %d CPPC\n", - pcc_ss_idx); - return -ENODEV; - } - + pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan; /* * cppc_ss->latency is just a Nominal value. In reality * the remote processor could be much slower to reply. * So add an arbitrary amount of wait on top of Nominal. */ - usecs_lat = NUM_RETRIES * cppc_ss->latency; + usecs_lat = NUM_RETRIES * pcc_chan->latency; pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; - pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; - pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; - pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; - - pcc_data[pcc_ss_idx]->pcc_comm_addr = - acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); - if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { - pr_err("Failed to ioremap PCC comm region mem for %d\n", - pcc_ss_idx); - return -ENOMEM; - } + pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time; + pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate; + pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency; - /* Set flag so that we dont come here for each CPU. */ + /* Set flag so that we don't come here for each CPU. */ pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; } @@ -609,16 +605,30 @@ bool __weak cpc_ffh_supported(void) } /** + * cpc_supported_by_cpu() - check if CPPC is supported by CPU + * + * Check if the architectural support for CPPC is present even + * if the _OSC hasn't prescribed it + * + * Return: true for supported, false for not supported + */ +bool __weak cpc_supported_by_cpu(void) +{ + return false; +} + +/** * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace + * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package. * * Check and allocate the cppc_pcc_data memory. * In some processor configurations it is possible that same subspace - * is shared between multiple CPU's. This is seen especially in CPU's + * is shared between multiple CPUs. This is seen especially in CPUs * with hardware multi-threading support. * * Return: 0 for success, errno for failure */ -int pcc_data_alloc(int pcc_ss_id) +static int pcc_data_alloc(int pcc_ss_id) { if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) return -EINVAL; @@ -636,82 +646,38 @@ int pcc_data_alloc(int pcc_ss_id) return 0; } -/* Check if CPPC revision + num_ent combination is supported */ -static bool is_cppc_supported(int revision, int num_ent) -{ - int expected_num_ent; - - switch (revision) { - case CPPC_V2_REV: - expected_num_ent = CPPC_V2_NUM_ENT; - break; - case CPPC_V3_REV: - expected_num_ent = CPPC_V3_NUM_ENT; - break; - default: - pr_debug("Firmware exports unsupported CPPC revision: %d\n", - revision); - return false; - } - - if (expected_num_ent != num_ent) { - pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", - num_ent, expected_num_ent, revision); - return false; - } - - return true; -} - /* * An example CPC table looks like the following. * - * Name(_CPC, Package() - * { - * 17, - * NumEntries - * 1, - * // Revision - * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)}, - * // Highest Performance - * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)}, - * // Nominal Performance - * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)}, - * // Lowest Nonlinear Performance - * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)}, - * // Lowest Performance - * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)}, - * // Guaranteed Performance Register - * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)}, - * // Desired Performance Register - * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)}, - * .. - * .. - * .. - * - * } + * Name (_CPC, Package() { + * 17, // NumEntries + * 1, // Revision + * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance + * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance + * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance + * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance + * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register + * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register + * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)}, + * ... + * ... + * ... + * } * Each Register() encodes how to access that specific register. * e.g. a sample PCC entry has the following encoding: * - * Register ( - * PCC, - * AddressSpaceKeyword - * 8, - * //RegisterBitWidth - * 8, - * //RegisterBitOffset - * 0x30, - * //RegisterAddress - * 9 - * //AccessSize (subspace ID) - * 0 - * ) - * } + * Register ( + * PCC, // AddressSpaceKeyword + * 8, // RegisterBitWidth + * 8, // RegisterBitOffset + * 0x30, // RegisterAddress + * 9, // AccessSize (subspace ID) + * ) */ /** * acpi_cppc_processor_probe - Search for per CPU _CPC objects. - * @pr: Ptr to acpi_processor containing this CPUs logical Id. + * @pr: Ptr to acpi_processor containing this CPU's logical ID. * * Return: 0 for success or negative value for err. */ @@ -726,9 +692,17 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) unsigned int num_ent, i, cpc_rev; int pcc_subspace_id = -1; acpi_status status; - int ret = -EFAULT; + int ret = -ENODATA; + + if (!osc_sb_cppc2_support_acked) { + pr_debug("CPPC v2 _OSC not acked\n"); + if (!cpc_supported_by_cpu()) { + pr_debug("CPPC is not supported by the CPU\n"); + return -ENODEV; + } + } - /* Parse the ACPI _CPC table for this cpu. */ + /* Parse the ACPI _CPC table for this CPU. */ status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, ACPI_TYPE_PACKAGE); if (ACPI_FAILURE(status)) { @@ -748,26 +722,52 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj = &out_obj->package.elements[0]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { num_ent = cpc_obj->integer.value; + if (num_ent <= 1) { + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } } else { - pr_debug("Unexpected entry type(%d) for NumEntries\n", - cpc_obj->type); + pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n", + cpc_obj->type, pr->id); goto out_free; } - cpc_ptr->num_entries = num_ent; /* Second entry should be revision. */ cpc_obj = &out_obj->package.elements[1]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { cpc_rev = cpc_obj->integer.value; } else { - pr_debug("Unexpected entry type(%d) for Revision\n", - cpc_obj->type); + pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n", + cpc_obj->type, pr->id); + goto out_free; + } + + if (cpc_rev < CPPC_V2_REV) { + pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, + pr->id); goto out_free; } - cpc_ptr->version = cpc_rev; - if (!is_cppc_supported(cpc_rev, num_ent)) + /* + * Disregard _CPC if the number of entries in the return package is not + * as expected, but support future revisions being proper supersets of + * the v3 and only causing more entries to be returned by _CPC. + */ + if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || + (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || + (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { + pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", + num_ent, pr->id); goto out_free; + } + if (cpc_rev > CPPC_V3_REV) { + num_ent = CPPC_V3_NUM_ENT; + cpc_rev = CPPC_V3_REV; + } + + cpc_ptr->num_entries = num_ent; + cpc_ptr->version = cpc_rev; /* Iterate through remaining entries in _CPC */ for (i = 2; i < num_ent; i++) { @@ -792,22 +792,54 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) if (pcc_data_alloc(pcc_subspace_id)) goto out_free; } else if (pcc_subspace_id != gas_t->access_width) { - pr_debug("Mismatched PCC ids.\n"); + pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n", + pr->id); goto out_free; } } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { if (gas_t->address) { void __iomem *addr; + size_t access_width; - addr = ioremap(gas_t->address, gas_t->bit_width/8); + if (!osc_cpc_flexible_adr_space_confirmed) { + pr_debug("Flexible address space capability not supported\n"); + if (!cpc_supported_by_cpu()) + goto out_free; + } + + access_width = GET_BIT_WIDTH(gas_t) / 8; + addr = ioremap(gas_t->address, access_width); if (!addr) goto out_free; cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; } + } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + if (gas_t->access_width < 1 || gas_t->access_width > 3) { + /* + * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit. + * SystemIO doesn't implement 64-bit + * registers. + */ + pr_debug("Invalid access width %d for SystemIO register in _CPC\n", + gas_t->access_width); + goto out_free; + } + if (gas_t->address & OVER_16BTS_MASK) { + /* SystemIO registers use 16-bit integer addresses */ + pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n", + gas_t->address); + goto out_free; + } + if (!osc_cpc_flexible_adr_space_confirmed) { + pr_debug("Flexible address space capability not supported\n"); + if (!cpc_supported_by_cpu()) + goto out_free; + } } else { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { - /* Support only PCC ,SYS MEM and FFH type regs */ - pr_debug("Unsupported register type: %d\n", gas_t->space_id); + /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */ + pr_debug("Unsupported register type (%d) in _CPC\n", + gas_t->space_id); goto out_free; } } @@ -815,7 +847,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); } else { - pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); + pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n", + i, pr->id); goto out_free; } } @@ -834,13 +867,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; + raw_spin_lock_init(&cpc_ptr->rmw_lock); /* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); if (ret) goto out_free; - /* Register PCC channel once for all PCC subspace id. */ + /* Register PCC channel once for all PCC subspace ID. */ if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { ret = register_pcc_channel(pcc_subspace_id); if (ret) @@ -860,13 +894,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } - /* Plug PSD data into this CPUs CPC descriptor. */ + /* Plug PSD data into this CPU's CPC descriptor. */ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, "acpi_cppc"); if (ret) { per_cpu(cpc_desc_ptr, pr->id) = NULL; + kobject_put(&cpc_ptr->kobj); goto out_free; } @@ -891,7 +926,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); /** * acpi_cppc_processor_exit - Cleanup CPC structs. - * @pr: Ptr to acpi_processor containing this CPUs logical Id. + * @pr: Ptr to acpi_processor containing this CPU's logical ID. * * Return: Void */ @@ -902,13 +937,13 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) void __iomem *addr; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); - if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) { + if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) { if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { pcc_data[pcc_ss_id]->refcount--; if (!pcc_data[pcc_ss_id]->refcount) { pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); - pcc_data[pcc_ss_id]->pcc_channel_acquired = 0; kfree(pcc_data[pcc_ss_id]); + pcc_data[pcc_ss_id] = NULL; } } } @@ -931,7 +966,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); /** * cpc_read_ffh() - Read FFH register - * @cpunum: cpu number to read + * @cpunum: CPU number to read * @reg: cppc register information * @val: place holder for return value * @@ -946,7 +981,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) /** * cpc_write_ffh() - Write FFH register - * @cpunum: cpu number to write + * @cpunum: CPU number to write * @reg: cppc register information * @val: value to write * @@ -967,91 +1002,338 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) { - int ret_val = 0; - void __iomem *vaddr = 0; + void __iomem *vaddr = NULL; + int size; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; if (reg_res->type == ACPI_TYPE_INTEGER) { *val = reg_res->cpc_entry.int_value; - return ret_val; + return 0; } *val = 0; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) + size = GET_BIT_WIDTH(reg); + + if (IS_ENABLED(CONFIG_HAS_IOPORT) && + reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + u32 val_u32; + acpi_status status; + + status = acpi_os_read_port((acpi_io_address)reg->address, + &val_u32, size); + if (ACPI_FAILURE(status)) { + pr_debug("Error: Failed to read SystemIO port %llx\n", + reg->address); + return -EFAULT; + } + + *val = val_u32; + return 0; + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { + /* + * For registers in PCC space, the register size is determined + * by the bit width field; the access size is used to indicate + * the PCC subspace id. + */ + size = reg->bit_width; vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_read_ffh(cpu, reg, val); else return acpi_os_read_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + val, size); - switch (reg->bit_width) { - case 8: - *val = readb_relaxed(vaddr); - break; - case 16: - *val = readw_relaxed(vaddr); - break; - case 32: - *val = readl_relaxed(vaddr); - break; - case 64: - *val = readq_relaxed(vaddr); - break; - default: + switch (size) { + case 8: + *val = readb_relaxed(vaddr); + break; + case 16: + *val = readw_relaxed(vaddr); + break; + case 32: + *val = readl_relaxed(vaddr); + break; + case 64: + *val = readq_relaxed(vaddr); + break; + default: + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n", + size, reg->address); + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", - reg->bit_width, pcc_ss_id); - ret_val = -EFAULT; + size, pcc_ss_id); + } + return -EFAULT; } - return ret_val; + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + *val = MASK_VAL_READ(reg, *val); + + return 0; } static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; - void __iomem *vaddr = 0; + int size; + u64 prev_val; + void __iomem *vaddr = NULL; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; + struct cpc_desc *cpc_desc; + unsigned long flags; + + size = GET_BIT_WIDTH(reg); + + if (IS_ENABLED(CONFIG_HAS_IOPORT) && + reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + acpi_status status; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) + status = acpi_os_write_port((acpi_io_address)reg->address, + (u32)val, size); + if (ACPI_FAILURE(status)) { + pr_debug("Error: Failed to write SystemIO port %llx\n", + reg->address); + return -EFAULT; + } + + return 0; + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { + /* + * For registers in PCC space, the register size is determined + * by the bit width field; the access size is used to indicate + * the PCC subspace id. + */ + size = reg->bit_width; vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_write_ffh(cpu, reg, val); else return acpi_os_write_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + val, size); - switch (reg->bit_width) { + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags); + switch (size) { case 8: - writeb_relaxed(val, vaddr); + prev_val = readb_relaxed(vaddr); break; case 16: - writew_relaxed(val, vaddr); + prev_val = readw_relaxed(vaddr); break; case 32: - writel_relaxed(val, vaddr); + prev_val = readl_relaxed(vaddr); break; case 64: - writeq_relaxed(val, vaddr); + prev_val = readq_relaxed(vaddr); break; default: + raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags); + return -EFAULT; + } + val = MASK_VAL_WRITE(reg, prev_val, val); + } + + switch (size) { + case 8: + writeb_relaxed(val, vaddr); + break; + case 16: + writew_relaxed(val, vaddr); + break; + case 32: + writel_relaxed(val, vaddr); + break; + case 64: + writeq_relaxed(val, vaddr); + break; + default: + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n", + size, reg->address); + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", - reg->bit_width, pcc_ss_id); - ret_val = -EFAULT; - break; + size, pcc_ss_id); + } + ret_val = -EFAULT; + break; } + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags); + return ret_val; } +static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val) +{ + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cppc_pcc_data *pcc_ss_data = NULL; + int ret; + + if (pcc_ss_id < 0) { + pr_debug("Invalid pcc_ss_id\n"); + return -ENODEV; + } + + pcc_ss_data = pcc_data[pcc_ss_id]; + + down_write(&pcc_ss_data->pcc_lock); + + if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) + ret = cpc_read(cpu, reg, val); + else + ret = -EIO; + + up_write(&pcc_ss_data->pcc_lock); + + return ret; +} + +static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val) +{ + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); + struct cpc_register_resource *reg; + + if (val == NULL) + return -EINVAL; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + reg = &cpc_desc->cpc_regs[reg_idx]; + + if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) && + !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER && + IS_NULL_REG(®->cpc_entry.reg))) { + pr_debug("CPC register is not supported\n"); + return -EOPNOTSUPP; + } + + if (CPC_IN_PCC(reg)) + return cppc_get_reg_val_in_pcc(cpu, reg, val); + + return cpc_read(cpu, reg, val); +} + +static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val) +{ + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cppc_pcc_data *pcc_ss_data = NULL; + int ret; + + if (pcc_ss_id < 0) { + pr_debug("Invalid pcc_ss_id\n"); + return -ENODEV; + } + + ret = cpc_write(cpu, reg, val); + if (ret) + return ret; + + pcc_ss_data = pcc_data[pcc_ss_id]; + + down_write(&pcc_ss_data->pcc_lock); + /* after writing CPC, transfer the ownership of PCC to platform */ + ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); + up_write(&pcc_ss_data->pcc_lock); + + return ret; +} + +static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val) +{ + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); + struct cpc_register_resource *reg; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + reg = &cpc_desc->cpc_regs[reg_idx]; + + /* if a register is writeable, it must be a buffer and not null */ + if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(®->cpc_entry.reg)) { + pr_debug("CPC register is not supported\n"); + return -EOPNOTSUPP; + } + + if (CPC_IN_PCC(reg)) + return cppc_set_reg_val_in_pcc(cpu, reg, val); + + return cpc_write(cpu, reg, val); +} + +/** + * cppc_get_desired_perf - Get the desired performance register value. + * @cpunum: CPU from which to get desired performance. + * @desired_perf: Return address. + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_desired_perf(int cpunum, u64 *desired_perf) +{ + return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf); +} +EXPORT_SYMBOL_GPL(cppc_get_desired_perf); + +/** + * cppc_get_nominal_perf - Get the nominal performance register value. + * @cpunum: CPU from which to get nominal performance. + * @nominal_perf: Return address. + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) +{ + return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf); +} + +/** + * cppc_get_highest_perf - Get the highest performance register value. + * @cpunum: CPU from which to get highest performance. + * @highest_perf: Return address. + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_highest_perf(int cpunum, u64 *highest_perf) +{ + return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf); +} +EXPORT_SYMBOL_GPL(cppc_get_highest_perf); + +/** + * cppc_get_epp_perf - Get the epp register value. + * @cpunum: CPU from which to get epp preference value. + * @epp_perf: Return address. + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_epp_perf(int cpunum, u64 *epp_perf) +{ + return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf); +} +EXPORT_SYMBOL_GPL(cppc_get_epp_perf); + /** - * cppc_get_perf_caps - Get a CPUs performance capabilities. + * cppc_get_perf_caps - Get a CPU's performance capabilities. * @cpunum: CPU from which to get capabilities info. * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h * @@ -1108,8 +1390,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) cpc_read(cpunum, nominal_reg, &nom); perf_caps->nominal_perf = nom; - cpc_read(cpunum, guaranteed_reg, &guaranteed); - perf_caps->guaranteed_perf = guaranteed; + if (guaranteed_reg->type != ACPI_TYPE_BUFFER || + IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { + perf_caps->guaranteed_perf = 0; + } else { + cpc_read(cpunum, guaranteed_reg, &guaranteed); + perf_caps->guaranteed_perf = guaranteed; + } cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); perf_caps->lowest_nonlinear_perf = min_nonlinear; @@ -1136,7 +1423,49 @@ out_err: EXPORT_SYMBOL_GPL(cppc_get_perf_caps); /** - * cppc_get_perf_ctrs - Read a CPUs performance feedback counters. + * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region. + * + * CPPC has flexibility about how CPU performance counters are accessed. + * One of the choices is PCC regions, which can have a high access latency. This + * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time. + * + * Return: true if any of the counters are in PCC regions, false otherwise + */ +bool cppc_perf_ctrs_in_pcc(void) +{ + int cpu; + + for_each_online_cpu(cpu) { + struct cpc_register_resource *ref_perf_reg; + struct cpc_desc *cpc_desc; + + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + + if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) || + CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) || + CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME])) + return true; + + + ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; + + /* + * If reference perf register is not supported then we should + * use the nominal perf value + */ + if (!CPC_SUPPORTED(ref_perf_reg)) + ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; + + if (CPC_IN_PCC(ref_perf_reg)) + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc); + +/** + * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. * @cpunum: CPU from which to read counters. * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h * @@ -1163,7 +1492,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; /* - * If refernce perf register is not supported then we should + * If reference perf register is not supported then we should * use the nominal perf value */ if (!CPC_SUPPORTED(ref_perf_reg)) @@ -1215,8 +1544,196 @@ out_err: } EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); +/* + * Set Energy Performance Preference Register value through + * Performance Controls Interface + */ +int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) +{ + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cpc_register_resource *epp_set_reg; + struct cpc_register_resource *auto_sel_reg; + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); + struct cppc_pcc_data *pcc_ss_data = NULL; + int ret; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; + epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF]; + + if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) { + if (pcc_ss_id < 0) { + pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu); + return -ENODEV; + } + + if (CPC_SUPPORTED(auto_sel_reg)) { + ret = cpc_write(cpu, auto_sel_reg, enable); + if (ret) + return ret; + } + + if (CPC_SUPPORTED(epp_set_reg)) { + ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); + if (ret) + return ret; + } + + pcc_ss_data = pcc_data[pcc_ss_id]; + + down_write(&pcc_ss_data->pcc_lock); + /* after writing CPC, transfer the ownership of PCC to platform */ + ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); + up_write(&pcc_ss_data->pcc_lock); + } else if (osc_cpc_flexible_adr_space_confirmed && + CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) { + ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); + } else { + ret = -ENOTSUPP; + pr_debug("_CPC in PCC and _CPC in FFH are not supported\n"); + } + + return ret; +} +EXPORT_SYMBOL_GPL(cppc_set_epp_perf); + +/** + * cppc_set_epp() - Write the EPP register. + * @cpu: CPU on which to write register. + * @epp_val: Value to write to the EPP register. + */ +int cppc_set_epp(int cpu, u64 epp_val) +{ + if (epp_val > CPPC_ENERGY_PERF_MAX) + return -EINVAL; + + return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val); +} +EXPORT_SYMBOL_GPL(cppc_set_epp); + +/** + * cppc_get_auto_act_window() - Read autonomous activity window register. + * @cpu: CPU from which to read register. + * @auto_act_window: Return address. + * + * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous + * activity window register consists of two parts: a 7 bits value indicate + * significand and a 3 bits value indicate exponent. + */ +int cppc_get_auto_act_window(int cpu, u64 *auto_act_window) +{ + unsigned int exp; + u64 val, sig; + int ret; + + if (auto_act_window == NULL) + return -EINVAL; + + ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val); + if (ret) + return ret; + + sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG; + exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP; + *auto_act_window = sig * int_pow(10, exp); + + return 0; +} +EXPORT_SYMBOL_GPL(cppc_get_auto_act_window); + +/** + * cppc_set_auto_act_window() - Write autonomous activity window register. + * @cpu: CPU on which to write register. + * @auto_act_window: usec value to write to the autonomous activity window register. + * + * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous + * activity window register consists of two parts: a 7 bits value indicate + * significand and a 3 bits value indicate exponent. + */ +int cppc_set_auto_act_window(int cpu, u64 auto_act_window) +{ + /* The max value to store is 1270000000 */ + u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP); + int exp = 0; + u64 val; + + if (auto_act_window > max_val) + return -EINVAL; + + /* + * The max significand is 127, when auto_act_window is larger than + * 129, discard the precision of the last digit and increase the + * exponent by 1. + */ + while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) { + auto_act_window /= 10; + exp += 1; + } + + /* For 128 and 129, cut it to 127. */ + if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG) + auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG; + + val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window; + + return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val); +} +EXPORT_SYMBOL_GPL(cppc_set_auto_act_window); + +/** + * cppc_get_auto_sel() - Read autonomous selection register. + * @cpu: CPU from which to read register. + * @enable: Return address. + */ +int cppc_get_auto_sel(int cpu, bool *enable) +{ + u64 auto_sel; + int ret; + + if (enable == NULL) + return -EINVAL; + + ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel); + if (ret) + return ret; + + *enable = (bool)auto_sel; + + return 0; +} +EXPORT_SYMBOL_GPL(cppc_get_auto_sel); + /** - * cppc_set_perf - Set a CPUs performance controls. + * cppc_set_auto_sel - Write autonomous selection register. + * @cpu : CPU to which to write register. + * @enable : the desired value of autonomous selection resiter to be updated. + */ +int cppc_set_auto_sel(int cpu, bool enable) +{ + return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable); +} +EXPORT_SYMBOL_GPL(cppc_set_auto_sel); + +/** + * cppc_set_enable - Set to enable CPPC on the processor by writing the + * Continuous Performance Control package EnableRegister field. + * @cpu: CPU for which to enable CPPC register. + * @enable: 0 - disable, 1 - enable CPPC feature on the processor. + * + * Return: 0 for success, -ERRNO or -EIO otherwise. + */ +int cppc_set_enable(int cpu, bool enable) +{ + return cppc_set_reg_val(cpu, ENABLE, enable); +} +EXPORT_SYMBOL_GPL(cppc_set_enable); + +/** + * cppc_set_perf - Set a CPU's performance controls. * @cpu: CPU for which to set performance controls. * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h * @@ -1225,7 +1742,7 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); - struct cpc_register_resource *desired_reg; + struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cppc_pcc_data *pcc_ss_data = NULL; int ret = 0; @@ -1236,6 +1753,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) } desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; + min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; + max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; /* * This is Phase-I where we want to write to CPC registers @@ -1244,7 +1763,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) * Since read_lock can be acquired by multiple CPUs simultaneously we * achieve that goal here */ - if (CPC_IN_PCC(desired_reg)) { + if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { if (pcc_ss_id < 0) { pr_debug("Invalid pcc_ss_id\n"); return -ENODEV; @@ -1267,13 +1786,19 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) cpc_desc->write_cmd_status = 0; } + cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); + /* - * Skip writing MIN/MAX until Linux knows how to come up with - * useful values. + * Only write if min_perf and max_perf not zero. Some drivers pass zero + * value to min and max perf, but they don't mean to set the zero value, + * they just don't want to write to those registers. */ - cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); + if (perf_ctrls->min_perf) + cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf); + if (perf_ctrls->max_perf) + cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf); - if (CPC_IN_PCC(desired_reg)) + if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ /* * This is Phase-II where we transfer the ownership of PCC to Platform @@ -1297,7 +1822,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) * executing the Phase-II. * 2. Some other CPU has beaten this CPU to successfully execute the * write_trylock and has already acquired the write_lock. We know for a - * fact it(other CPU acquiring the write_lock) couldn't have happened + * fact it (other CPU acquiring the write_lock) couldn't have happened * before this CPU's Phase-I as we held the read_lock. * 3. Some other CPU executing pcc CMD_READ has stolen the * down_write, in which case, send_pcc_cmd will check for pending @@ -1317,11 +1842,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) * is still with OSPM. * pending_pcc_write_cmd can also be cleared by a different CPU, if * there was a pcc CMD_READ waiting on down_write and it steals the lock - * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this + * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this * case during a CMD_READ and if there are pending writes it delivers * the write command before servicing the read command */ - if (CPC_IN_PCC(desired_reg)) { + if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ /* Update only if there are pending write commands */ if (pcc_ss_data->pending_pcc_write_cmd) @@ -1341,13 +1866,17 @@ EXPORT_SYMBOL_GPL(cppc_set_perf); /** * cppc_get_transition_latency - returns frequency transition latency in ns + * @cpu_num: CPU number for per_cpu(). * - * ACPI CPPC does not explicitly specifiy how a platform can specify the - * transition latency for perfromance change requests. The closest we have + * ACPI CPPC does not explicitly specify how a platform can specify the + * transition latency for performance change requests. The closest we have * is the timing information from the PCCT tables which provides the info * on the number and frequency of PCC commands the platform can handle. + * + * If desired_reg is in the SystemMemory or SystemIo ACPI address space, + * then assume there is no latency. */ -unsigned int cppc_get_transition_latency(int cpu_num) +int cppc_get_transition_latency(int cpu_num) { /* * Expected transition latency is based on the PCCT timing values @@ -1360,30 +1889,143 @@ unsigned int cppc_get_transition_latency(int cpu_num) * completion of a command before issuing the next command, * in microseconds. */ - unsigned int latency_ns = 0; struct cpc_desc *cpc_desc; struct cpc_register_resource *desired_reg; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); struct cppc_pcc_data *pcc_ss_data; + int latency_ns = 0; cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); if (!cpc_desc) - return CPUFREQ_ETERNAL; + return -ENODATA; desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; - if (!CPC_IN_PCC(desired_reg)) - return CPUFREQ_ETERNAL; + if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg)) + return 0; - if (pcc_ss_id < 0) - return CPUFREQ_ETERNAL; + if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0) + return -ENODATA; pcc_ss_data = pcc_data[pcc_ss_id]; if (pcc_ss_data->pcc_mpar) latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); - latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); - latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); + latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000); + latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000); return latency_ns; } EXPORT_SYMBOL_GPL(cppc_get_transition_latency); + +/* Minimum struct length needed for the DMI processor entry we want */ +#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 + +/* Offset in the DMI processor structure for the max frequency */ +#define DMI_PROCESSOR_MAX_SPEED 0x14 + +/* Callback function used to retrieve the max frequency from DMI */ +static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) +{ + const u8 *dmi_data = (const u8 *)dm; + u16 *mhz = (u16 *)private; + + if (dm->type == DMI_ENTRY_PROCESSOR && + dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { + u16 val = (u16)get_unaligned((const u16 *) + (dmi_data + DMI_PROCESSOR_MAX_SPEED)); + *mhz = umax(val, *mhz); + } +} + +/* Look up the max frequency in DMI */ +static u64 cppc_get_dmi_max_khz(void) +{ + u16 mhz = 0; + + dmi_walk(cppc_find_dmi_mhz, &mhz); + + /* + * Real stupid fallback value, just in case there is no + * actual value set. + */ + mhz = mhz ? mhz : 1; + + return KHZ_PER_MHZ * mhz; +} + +/* + * If CPPC lowest_freq and nominal_freq registers are exposed then we can + * use them to convert perf to freq and vice versa. The conversion is + * extrapolated as an affine function passing by the 2 points: + * - (Low perf, Low freq) + * - (Nominal perf, Nominal freq) + */ +unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf) +{ + s64 retval, offset = 0; + static u64 max_khz; + u64 mul, div; + + if (caps->lowest_freq && caps->nominal_freq) { + /* Avoid special case when nominal_freq is equal to lowest_freq */ + if (caps->lowest_freq == caps->nominal_freq) { + mul = caps->nominal_freq; + div = caps->nominal_perf; + } else { + mul = caps->nominal_freq - caps->lowest_freq; + div = caps->nominal_perf - caps->lowest_perf; + } + mul *= KHZ_PER_MHZ; + offset = caps->nominal_freq * KHZ_PER_MHZ - + div64_u64(caps->nominal_perf * mul, div); + } else { + if (!max_khz) + max_khz = cppc_get_dmi_max_khz(); + mul = max_khz; + div = caps->highest_perf; + } + + retval = offset + div64_u64(perf * mul, div); + if (retval >= 0) + return retval; + return 0; +} +EXPORT_SYMBOL_GPL(cppc_perf_to_khz); + +unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq) +{ + s64 retval, offset = 0; + static u64 max_khz; + u64 mul, div; + + if (caps->lowest_freq && caps->nominal_freq) { + /* Avoid special case when nominal_freq is equal to lowest_freq */ + if (caps->lowest_freq == caps->nominal_freq) { + mul = caps->nominal_perf; + div = caps->nominal_freq; + } else { + mul = caps->nominal_perf - caps->lowest_perf; + div = caps->nominal_freq - caps->lowest_freq; + } + /* + * We don't need to convert to kHz for computing offset and can + * directly use nominal_freq and lowest_freq as the div64_u64 + * will remove the frequency unit. + */ + offset = caps->nominal_perf - + div64_u64(caps->nominal_freq * mul, div); + /* But we need it for computing the perf level. */ + div *= KHZ_PER_MHZ; + } else { + if (!max_khz) + max_khz = cppc_get_dmi_max_khz(); + mul = caps->highest_perf; + div = max_khz; + } + + retval = offset + div64_u64(freq * mul, div); + if (retval >= 0) + return retval; + return 0; +} +EXPORT_SYMBOL_GPL(cppc_khz_to_perf); diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c deleted file mode 100644 index 4451877f83b6..000000000000 --- a/drivers/acpi/custom_method.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * custom_method.c - debugfs interface for customizing ACPI control method - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/uaccess.h> -#include <linux/debugfs.h> -#include <linux/acpi.h> - -#include "internal.h" - -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("custom_method"); -MODULE_LICENSE("GPL"); - -static struct dentry *cm_dentry; - -/* /sys/kernel/debug/acpi/custom_method */ - -static ssize_t cm_write(struct file *file, const char __user * user_buf, - size_t count, loff_t *ppos) -{ - static char *buf; - static u32 max_size; - static u32 uncopied_bytes; - - struct acpi_table_header table; - acpi_status status; - - if (!(*ppos)) { - /* parse the table header to get the table length */ - if (count <= sizeof(struct acpi_table_header)) - return -EINVAL; - if (copy_from_user(&table, user_buf, - sizeof(struct acpi_table_header))) - return -EFAULT; - uncopied_bytes = max_size = table.length; - buf = kzalloc(max_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - } - - if (buf == NULL) - return -EINVAL; - - if ((*ppos > max_size) || - (*ppos + count > max_size) || - (*ppos + count < count) || - (count > uncopied_bytes)) - return -EINVAL; - - if (copy_from_user(buf + (*ppos), user_buf, count)) { - kfree(buf); - buf = NULL; - return -EFAULT; - } - - uncopied_bytes -= count; - *ppos += count; - - if (!uncopied_bytes) { - status = acpi_install_method(buf); - kfree(buf); - buf = NULL; - if (ACPI_FAILURE(status)) - return -EINVAL; - add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); - } - - return count; -} - -static const struct file_operations cm_fops = { - .write = cm_write, - .llseek = default_llseek, -}; - -static int __init acpi_custom_method_init(void) -{ - if (acpi_debugfs_dir == NULL) - return -ENOENT; - - cm_dentry = debugfs_create_file("custom_method", S_IWUSR, - acpi_debugfs_dir, NULL, &cm_fops); - if (cm_dentry == NULL) - return -ENODEV; - - return 0; -} - -static void __exit acpi_custom_method_exit(void) -{ - debugfs_remove(cm_dentry); -} - -module_init(acpi_custom_method_init); -module_exit(acpi_custom_method_exit); diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 68bb305b977f..074eb98d213e 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * debugfs.c - ACPI debugfs interface to userspace. */ @@ -9,9 +10,6 @@ #include "internal.h" -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("debugfs"); - struct dentry *acpi_debugfs_dir; EXPORT_SYMBOL_GPL(acpi_debugfs_dir); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 824ae985ad93..4e0583274b8f 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/device_pm.c - ACPI device power management routines. * @@ -6,18 +7,11 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "PM: " fmt + #include <linux/acpi.h> #include <linux/export.h> #include <linux/mutex.h> @@ -26,11 +20,9 @@ #include <linux/pm_runtime.h> #include <linux/suspend.h> +#include "fan.h" #include "internal.h" -#define _COMPONENT ACPI_POWER_COMPONENT -ACPI_MODULE_NAME("device_pm"); - /** * acpi_power_state_string - String representation of ACPI device power state. * @state: ACPI device power state to return the string representation of. @@ -53,6 +45,19 @@ const char *acpi_power_state_string(int state) } } +static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state) +{ + unsigned long long psc; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_PSC", NULL, &psc); + if (ACPI_FAILURE(status)) + return -ENODEV; + + *state = psc; + return 0; +} + /** * acpi_device_get_power - Get power state of an ACPI device. * @device: Device to get the power state of. @@ -61,18 +66,26 @@ const char *acpi_power_state_string(int state) * This function does not update the device's power.state field, but it may * update its parent's power.state field (when the parent's power state is * unknown and the device's power state turns out to be D0). + * + * Also, it does not update power resource reference counters to ensure that + * the power state returned by it will be persistent and it may return a power + * state shallower than previously set by acpi_device_set_power() for @device + * (if that power state depends on any power resources). */ int acpi_device_get_power(struct acpi_device *device, int *state) { int result = ACPI_STATE_UNKNOWN; + struct acpi_device *parent; + int error; if (!device || !state) return -EINVAL; + parent = acpi_dev_parent(device); + if (!device->flags.power_manageable) { /* TBD: Non-recursive algorithm for walking up hierarchy. */ - *state = device->parent ? - device->parent->power.state : ACPI_STATE_D0; + *state = parent ? parent->power.state : ACPI_STATE_D0; goto out; } @@ -81,18 +94,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state) * if available. */ if (device->power.flags.power_resources) { - int error = acpi_power_get_inferred_state(device, &result); + error = acpi_power_get_inferred_state(device, &result); if (error) return error; } if (device->power.flags.explicit_get) { - acpi_handle handle = device->handle; - unsigned long long psc; - acpi_status status; + int psc; - status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc); - if (ACPI_FAILURE(status)) - return -ENODEV; + error = acpi_dev_pm_explicit_get(device, &psc); + if (error) + return error; /* * The power resources settings may indicate a power state @@ -113,20 +124,19 @@ int acpi_device_get_power(struct acpi_device *device, int *state) * point, the fact that the device is in D0 implies that the parent has * to be in D0 too, except if ignore_parent is set. */ - if (!device->power.flags.ignore_parent && device->parent - && device->parent->power.state == ACPI_STATE_UNKNOWN - && result == ACPI_STATE_D0) - device->parent->power.state = ACPI_STATE_D0; + if (!device->power.flags.ignore_parent && parent && + parent->power.state == ACPI_STATE_UNKNOWN && + result == ACPI_STATE_D0) + parent->power.state = ACPI_STATE_D0; *state = result; out: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", - device->pnp.bus_id, acpi_power_state_string(*state))); + acpi_handle_debug(device->handle, "Power state: %s\n", + acpi_power_state_string(*state)); return 0; } -EXPORT_SYMBOL(acpi_device_get_power); static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) { @@ -158,14 +168,15 @@ int acpi_device_set_power(struct acpi_device *device, int state) || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) return -EINVAL; + acpi_handle_debug(device->handle, "Power state change: %s -> %s\n", + acpi_power_state_string(device->power.state), + acpi_power_state_string(state)); + /* Make sure this is a valid target state */ - if (state == device->power.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n", - device->pnp.bus_id, - acpi_power_state_string(state))); - return 0; - } + /* There is a special case for D0 addressed below. */ + if (state > ACPI_STATE_D0 && state == device->power.state) + goto no_change; if (state == ACPI_STATE_D3_COLD) { /* @@ -173,22 +184,26 @@ int acpi_device_set_power(struct acpi_device *device, int state) * possibly drop references to the power resources in use. */ state = ACPI_STATE_D3_HOT; - /* If _PR3 is not available, use D3hot as the target state. */ + /* If D3cold is not supported, use D3hot as the target state. */ if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) target_state = state; } else if (!device->power.states[state].flags.valid) { - dev_warn(&device->dev, "Power state %s not supported\n", - acpi_power_state_string(state)); + acpi_handle_debug(device->handle, "Power state %s not supported\n", + acpi_power_state_string(state)); return -ENODEV; } - if (!device->power.flags.ignore_parent && - device->parent && (state < device->parent->power.state)) { - dev_warn(&device->dev, - "Cannot transition to power state %s for parent in %s\n", - acpi_power_state_string(state), - acpi_power_state_string(device->parent->power.state)); - return -ENODEV; + if (!device->power.flags.ignore_parent) { + struct acpi_device *parent; + + parent = acpi_dev_parent(device); + if (parent && state < parent->power.state) { + acpi_handle_debug(device->handle, + "Cannot transition to %s for parent in %s\n", + acpi_power_state_string(state), + acpi_power_state_string(parent->power.state)); + return -ENODEV; + } } /* @@ -204,53 +219,88 @@ int acpi_device_set_power(struct acpi_device *device, int state) * (deeper) states to higher-power (shallower) states. */ if (state < device->power.state) { - dev_warn(&device->dev, "Cannot transition from %s to %s\n", - acpi_power_state_string(device->power.state), - acpi_power_state_string(state)); + acpi_handle_debug(device->handle, + "Cannot transition from %s to %s\n", + acpi_power_state_string(device->power.state), + acpi_power_state_string(state)); return -ENODEV; } - result = acpi_dev_pm_explicit_set(device, state); - if (result) - goto end; + /* + * If the device goes from D3hot to D3cold, _PS3 has been + * evaluated for it already, so skip it in that case. + */ + if (device->power.state < ACPI_STATE_D3_HOT) { + result = acpi_dev_pm_explicit_set(device, state); + if (result) + goto end; + } if (device->power.flags.power_resources) result = acpi_power_transition(device, target_state); } else { + int cur_state = device->power.state; + if (device->power.flags.power_resources) { result = acpi_power_transition(device, ACPI_STATE_D0); if (result) goto end; } + + if (cur_state == ACPI_STATE_D0) { + int psc; + + /* Nothing to do here if _PSC is not present. */ + if (!device->power.flags.explicit_get) + goto no_change; + + /* + * The power state of the device was set to D0 last + * time, but that might have happened before a + * system-wide transition involving the platform + * firmware, so it may be necessary to evaluate _PS0 + * for the device here. However, use extra care here + * and evaluate _PSC to check the device's current power + * state, and only invoke _PS0 if the evaluation of _PSC + * is successful and it returns a power state different + * from D0. + */ + result = acpi_dev_pm_explicit_get(device, &psc); + if (result || psc == ACPI_STATE_D0) + goto no_change; + } + result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); } - end: +end: if (result) { - dev_warn(&device->dev, "Failed to change power state to %s\n", - acpi_power_state_string(state)); + acpi_handle_debug(device->handle, + "Failed to change power state to %s\n", + acpi_power_state_string(target_state)); } else { device->power.state = target_state; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Device [%s] transitioned to %s\n", - device->pnp.bus_id, - acpi_power_state_string(state))); + acpi_handle_debug(device->handle, "Power state changed to %s\n", + acpi_power_state_string(target_state)); } return result; + +no_change: + acpi_handle_debug(device->handle, "Already in %s\n", + acpi_power_state_string(state)); + return 0; } EXPORT_SYMBOL(acpi_device_set_power); int acpi_bus_set_power(acpi_handle handle, int state) { - struct acpi_device *device; - int result; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); - result = acpi_bus_get_device(handle, &device); - if (result) - return result; + if (device) + return acpi_device_set_power(device, state); - return acpi_device_set_power(device, state); + return -ENODEV; } EXPORT_SYMBOL(acpi_bus_set_power); @@ -325,6 +375,41 @@ int acpi_device_fix_up_power(struct acpi_device *device) } EXPORT_SYMBOL_GPL(acpi_device_fix_up_power); +static int fix_up_power_if_applicable(struct acpi_device *adev, void *not_used) +{ + if (adev->status.present && adev->status.enabled) + acpi_device_fix_up_power(adev); + + return 0; +} + +/** + * acpi_device_fix_up_power_extended - Force device and its children into D0. + * @adev: Parent device object whose power state is to be fixed up. + * + * Call acpi_device_fix_up_power() for @adev and its children so long as they + * are reported as present and enabled. + */ +void acpi_device_fix_up_power_extended(struct acpi_device *adev) +{ + acpi_device_fix_up_power(adev); + acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL); +} +EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended); + +/** + * acpi_device_fix_up_power_children - Force a device's children into D0. + * @adev: Parent device object whose children's power state is to be fixed up. + * + * Call acpi_device_fix_up_power() for @adev's children so long as they + * are reported as present and enabled. + */ +void acpi_device_fix_up_power_children(struct acpi_device *adev) +{ + acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL); +} +EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children); + int acpi_device_update_power(struct acpi_device *device, int *state_p) { int state; @@ -368,24 +453,69 @@ EXPORT_SYMBOL_GPL(acpi_device_update_power); int acpi_bus_update_power(acpi_handle handle, int *state_p) { - struct acpi_device *device; - int result; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); + + if (device) + return acpi_device_update_power(device, state_p); - result = acpi_bus_get_device(handle, &device); - return result ? result : acpi_device_update_power(device, state_p); + return -ENODEV; } EXPORT_SYMBOL_GPL(acpi_bus_update_power); bool acpi_bus_power_manageable(acpi_handle handle) { - struct acpi_device *device; - int result; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); - result = acpi_bus_get_device(handle, &device); - return result ? false : device->flags.power_manageable; + return device && device->flags.power_manageable; } EXPORT_SYMBOL(acpi_bus_power_manageable); +static int acpi_power_up_if_adr_present(struct acpi_device *adev, void *not_used) +{ + if (!(adev->flags.power_manageable && adev->pnp.type.bus_address)) + return 0; + + acpi_handle_debug(adev->handle, "Power state: %s\n", + acpi_power_state_string(adev->power.state)); + + if (adev->power.state == ACPI_STATE_D3_COLD) + return acpi_device_set_power(adev, ACPI_STATE_D0); + + return 0; +} + +/** + * acpi_dev_power_up_children_with_adr - Power up childres with valid _ADR + * @adev: Parent ACPI device object. + * + * Change the power states of the direct children of @adev that are in D3cold + * and hold valid _ADR objects to D0 in order to allow bus (e.g. PCI) + * enumeration code to access them. + */ +void acpi_dev_power_up_children_with_adr(struct acpi_device *adev) +{ + acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL); +} + +/** + * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling + * @adev: ACPI companion of the target device. + * + * Evaluate _S0W for @adev and return the value produced by it or return + * ACPI_STATE_UNKNOWN on errors (including _S0W not present). + */ +u8 acpi_dev_power_state_for_wake(struct acpi_device *adev) +{ + unsigned long long state; + acpi_status status; + + status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state); + if (ACPI_FAILURE(status)) + return ACPI_STATE_UNKNOWN; + + return state; +} + #ifdef CONFIG_PM static DEFINE_MUTEX(acpi_pm_notifier_lock); static DEFINE_MUTEX(acpi_pm_notifier_install_lock); @@ -405,7 +535,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) acpi_handle_debug(handle, "Wake notify\n"); - adev = acpi_bus_get_acpi_device(handle); + adev = acpi_get_acpi_dev(handle); if (!adev) return; @@ -414,7 +544,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) if (adev->wakeup.flags.notifier_present) { pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup()); if (adev->wakeup.context.func) { - acpi_handle_debug(handle, "Running %pF for %s\n", + acpi_handle_debug(handle, "Running %pS for %s\n", adev->wakeup.context.func, dev_name(adev->wakeup.context.dev)); adev->wakeup.context.func(&adev->wakeup.context); @@ -423,7 +553,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_unlock(&acpi_pm_notifier_lock); - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); } /** @@ -456,7 +586,8 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, goto out; mutex_lock(&acpi_pm_notifier_lock); - adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); + adev->wakeup.ws = wakeup_source_register(&adev->dev, + dev_name(&adev->dev)); adev->wakeup.context.dev = dev; adev->wakeup.context.func = func; adev->wakeup.flags.notifier_present = true; @@ -500,11 +631,9 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) bool acpi_bus_can_wakeup(acpi_handle handle) { - struct acpi_device *device; - int result; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); - result = acpi_bus_get_device(handle, &device); - return result ? false : device->wakeup.flags.valid; + return device && device->wakeup.flags.valid; } EXPORT_SYMBOL(acpi_bus_can_wakeup); @@ -590,7 +719,22 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, d_min = ret; wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid && adev->wakeup.sleep_state >= target_state; + } else if (device_may_wakeup(dev) && dev->power.wakeirq) { + /* + * The ACPI subsystem doesn't manage the wake bit for IRQs + * defined with ExclusiveAndWake and SharedAndWake. Instead we + * expect them to be managed via the PM subsystem. Drivers + * should call dev_pm_set_wake_irq to register an IRQ as a wake + * source. + * + * If a device has a wake IRQ attached we need to check the + * _S0W method to get the correct wake D-state. Otherwise we + * end up putting the device into D3Cold which will more than + * likely disable wake functionality. + */ + wakeup = true; } else { + /* ACPI GPE is specified in _PRW. */ wakeup = adev->wakeup.flags.valid; } @@ -703,7 +847,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context) static DEFINE_MUTEX(acpi_wakeup_lock); static int __acpi_device_wakeup_enable(struct acpi_device *adev, - u32 target_state, int max_count) + u32 target_state) { struct acpi_device_wakeup *wakeup = &adev->wakeup; acpi_status status; @@ -711,15 +855,26 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev, mutex_lock(&acpi_wakeup_lock); - if (wakeup->enable_count >= max_count) - goto out; - + /* + * If the device wakeup power is already enabled, disable it and enable + * it again in case it depends on the configuration of subordinate + * devices and the conditions have changed since it was enabled last + * time. + */ if (wakeup->enable_count > 0) - goto inc; + acpi_disable_wakeup_device_power(adev); error = acpi_enable_wakeup_device_power(adev, target_state); - if (error) + if (error) { + if (wakeup->enable_count > 0) { + acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); + wakeup->enable_count = 0; + } goto out; + } + + if (wakeup->enable_count > 0) + goto inc; status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); if (ACPI_FAILURE(status)) { @@ -728,8 +883,14 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev, goto out; } + acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n", + (unsigned int)wakeup->gpe_number); + inc: - wakeup->enable_count++; + if (wakeup->enable_count < INT_MAX) + wakeup->enable_count++; + else + acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n"); out: mutex_unlock(&acpi_wakeup_lock); @@ -750,7 +911,7 @@ out: */ static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state) { - return __acpi_device_wakeup_enable(adev, target_state, 1); + return __acpi_device_wakeup_enable(adev, target_state); } /** @@ -780,8 +941,12 @@ out: mutex_unlock(&acpi_wakeup_lock); } -static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, - int max_count) +/** + * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. + * @dev: Device to enable/disable to generate wakeup events. + * @enable: Whether to enable or disable the wakeup functionality. + */ +int acpi_pm_set_device_wakeup(struct device *dev, bool enable) { struct acpi_device *adev; int error; @@ -801,37 +966,15 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, return 0; } - error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(), - max_count); + error = __acpi_device_wakeup_enable(adev, acpi_target_system_state()); if (!error) dev_dbg(dev, "Wakeup enabled by ACPI\n"); return error; } - -/** - * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. - * @dev: Device to enable/disable to generate wakeup events. - * @enable: Whether to enable or disable the wakeup functionality. - */ -int acpi_pm_set_device_wakeup(struct device *dev, bool enable) -{ - return __acpi_pm_set_device_wakeup(dev, enable, 1); -} EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup); /** - * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge. - * @dev: Bridge device to enable/disable to generate wakeup events. - * @enable: Whether to enable or disable the wakeup functionality. - */ -int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) -{ - return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX); -} -EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup); - -/** * acpi_dev_pm_low_power - Put ACPI device into a low-power state. * @dev: Device to put into a low-power state. * @adev: ACPI device node corresponding to @dev. @@ -925,6 +1068,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume); int acpi_subsys_runtime_suspend(struct device *dev) { int ret = pm_generic_runtime_suspend(dev); + return ret ? ret : acpi_dev_suspend(dev, true); } EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); @@ -939,6 +1083,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); int acpi_subsys_runtime_resume(struct device *dev) { int ret = acpi_dev_resume(dev); + return ret ? ret : pm_generic_runtime_resume(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); @@ -949,8 +1094,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) u32 sys_target = acpi_target_system_state(); int ret, state; - if (!pm_runtime_suspended(dev) || !adev || - device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid && + device_may_wakeup(dev) != !!adev->wakeup.prepare_count)) return true; if (sys_target == ACPI_STATE_S0) @@ -974,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); + dev_pm_set_strict_midlayer(dev, true); + if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) { int ret = dev->driver->pm->prepare(dev); @@ -1002,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev) */ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) pm_request_resume(dev); + + dev_pm_set_strict_midlayer(dev, false); } EXPORT_SYMBOL_GPL(acpi_subsys_complete); @@ -1016,7 +1165,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete); */ int acpi_subsys_suspend(struct device *dev) { - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || + if (!dev_pm_smart_suspend(dev) || acpi_dev_needs_resume(dev, ACPI_COMPANION(dev))) pm_runtime_resume(dev); @@ -1035,7 +1184,7 @@ int acpi_subsys_suspend_late(struct device *dev) { int ret; - if (dev_pm_smart_suspend_and_suspended(dev)) + if (dev_pm_skip_suspend(dev)) return 0; ret = pm_generic_suspend_late(dev); @@ -1051,10 +1200,8 @@ int acpi_subsys_suspend_noirq(struct device *dev) { int ret; - if (dev_pm_smart_suspend_and_suspended(dev)) { - dev->power.may_skip_resume = true; + if (dev_pm_skip_suspend(dev)) return 0; - } ret = pm_generic_suspend_noirq(dev); if (ret) @@ -1067,8 +1214,8 @@ int acpi_subsys_suspend_noirq(struct device *dev) * acpi_subsys_complete() to take care of fixing up the device's state * anyway, if need be. */ - dev->power.may_skip_resume = device_may_wakeup(dev) || - !device_can_wakeup(dev); + if (device_can_wakeup(dev) && !device_may_wakeup(dev)) + dev->power.may_skip_resume = false; return 0; } @@ -1078,22 +1225,13 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); * acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback. * @dev: Device to handle. */ -int acpi_subsys_resume_noirq(struct device *dev) +static int acpi_subsys_resume_noirq(struct device *dev) { - if (dev_pm_may_skip_resume(dev)) + if (dev_pm_skip_resume(dev)) return 0; - /* - * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend - * during system suspend, so update their runtime PM status to "active" - * as they will be put into D0 going forward. - */ - if (dev_pm_smart_suspend_and_suspended(dev)) - pm_runtime_set_active(dev); - return pm_generic_resume_noirq(dev); } -EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq); /** * acpi_subsys_resume_early - Resume device using ACPI. @@ -1101,14 +1239,47 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq); * * Use ACPI to put the given device into the full-power state and carry out the * generic early resume procedure for it during system transition into the - * working state. + * working state, but only do that if device either defines early resume + * handler, or does not define power operations at all. Otherwise powering up + * of the device is postponed to the normal resume phase. */ -int acpi_subsys_resume_early(struct device *dev) +static int acpi_subsys_resume_early(struct device *dev) { - int ret = acpi_dev_resume(dev); + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret; + + if (dev_pm_skip_resume(dev)) + return 0; + + if (pm && !pm->resume_early) { + dev_dbg(dev, "postponing D0 transition to normal resume stage\n"); + return 0; + } + + ret = acpi_dev_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } -EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); + +/** + * acpi_subsys_resume - Resume device using ACPI. + * @dev: Device to Resume. + * + * Use ACPI to put the given device into the full-power state if it has not been + * powered up during early resume phase, and carry out the generic resume + * procedure for it during system transition into the working state. + */ +static int acpi_subsys_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret = 0; + + if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) { + dev_dbg(dev, "executing postponed D0 transition\n"); + ret = acpi_dev_resume(dev); + } + + return ret ? ret : pm_generic_resume(dev); +} /** * acpi_subsys_freeze - Run the device driver's freeze callback. @@ -1117,67 +1288,86 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); int acpi_subsys_freeze(struct device *dev) { /* - * This used to be done in acpi_subsys_prepare() for all devices and - * some drivers may depend on it, so do it here. Ideally, however, - * runtime-suspended devices should not be touched during freeze/thaw - * transitions. + * Resume all runtime-suspended devices before creating a snapshot + * image of system memory, because the restore kernel generally cannot + * be expected to always handle them consistently and they need to be + * put into the runtime-active metastate during system resume anyway, + * so it is better to ensure that the state saved in the image will be + * always consistent with that. */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) - pm_runtime_resume(dev); + pm_runtime_resume(dev); return pm_generic_freeze(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_freeze); /** - * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback. - * @dev: Device to handle. + * acpi_subsys_restore_early - Restore device using ACPI. + * @dev: Device to restore. */ -int acpi_subsys_freeze_late(struct device *dev) +int acpi_subsys_restore_early(struct device *dev) { + int ret = acpi_dev_resume(dev); - if (dev_pm_smart_suspend_and_suspended(dev)) - return 0; + return ret ? ret : pm_generic_restore_early(dev); +} +EXPORT_SYMBOL_GPL(acpi_subsys_restore_early); - return pm_generic_freeze_late(dev); +/** + * acpi_subsys_poweroff - Run the device driver's poweroff callback. + * @dev: Device to handle. + * + * Follow PCI and resume devices from runtime suspend before running their + * system poweroff callbacks, unless the driver can cope with runtime-suspended + * devices during system suspend and there are no ACPI-specific reasons for + * resuming them. + */ +int acpi_subsys_poweroff(struct device *dev) +{ + if (!dev_pm_smart_suspend(dev) || + acpi_dev_needs_resume(dev, ACPI_COMPANION(dev))) + pm_runtime_resume(dev); + + return pm_generic_poweroff(dev); } -EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late); +EXPORT_SYMBOL_GPL(acpi_subsys_poweroff); /** - * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback. + * acpi_subsys_poweroff_late - Run the device driver's poweroff callback. * @dev: Device to handle. + * + * Carry out the generic late poweroff procedure for @dev and use ACPI to put + * it into a low-power state during system transition into a sleep state. */ -int acpi_subsys_freeze_noirq(struct device *dev) +static int acpi_subsys_poweroff_late(struct device *dev) { + int ret; - if (dev_pm_smart_suspend_and_suspended(dev)) + if (dev_pm_skip_suspend(dev)) return 0; - return pm_generic_freeze_noirq(dev); + ret = pm_generic_poweroff_late(dev); + if (ret) + return ret; + + return acpi_dev_suspend(dev, device_may_wakeup(dev)); } -EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq); /** - * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback. - * @dev: Device to handle. + * acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback. + * @dev: Device to suspend. */ -int acpi_subsys_thaw_noirq(struct device *dev) +static int acpi_subsys_poweroff_noirq(struct device *dev) { - /* - * If the device is in runtime suspend, the "thaw" code may not work - * correctly with it, so skip the driver callback and make the PM core - * skip all of the subsequent "thaw" callbacks for the device. - */ - if (dev_pm_smart_suspend_and_suspended(dev)) { - dev_pm_skip_next_resume_phases(dev); + if (dev_pm_skip_suspend(dev)) return 0; - } - return pm_generic_thaw_noirq(dev); + return pm_generic_poweroff_noirq(dev); } -EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq); #endif /* CONFIG_PM_SLEEP */ +static void acpi_dev_pm_detach(struct device *dev, bool power_off); + static struct dev_pm_domain acpi_general_pm_domain = { .ops = { .runtime_suspend = acpi_subsys_runtime_suspend, @@ -1186,21 +1376,19 @@ static struct dev_pm_domain acpi_general_pm_domain = { .prepare = acpi_subsys_prepare, .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, + .resume = acpi_subsys_resume, .suspend_late = acpi_subsys_suspend_late, .suspend_noirq = acpi_subsys_suspend_noirq, .resume_noirq = acpi_subsys_resume_noirq, .resume_early = acpi_subsys_resume_early, .freeze = acpi_subsys_freeze, - .freeze_late = acpi_subsys_freeze_late, - .freeze_noirq = acpi_subsys_freeze_noirq, - .thaw_noirq = acpi_subsys_thaw_noirq, - .poweroff = acpi_subsys_suspend, - .poweroff_late = acpi_subsys_suspend_late, - .poweroff_noirq = acpi_subsys_suspend_noirq, - .restore_noirq = acpi_subsys_resume_noirq, - .restore_early = acpi_subsys_resume_early, + .poweroff = acpi_subsys_poweroff, + .poweroff_late = acpi_subsys_poweroff_late, + .poweroff_noirq = acpi_subsys_poweroff_noirq, + .restore_early = acpi_subsys_restore_early, #endif }, + .detach = acpi_dev_pm_detach, }; /** @@ -1255,9 +1443,18 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off) */ int acpi_dev_pm_attach(struct device *dev, bool power_on) { + /* + * Skip devices whose ACPI companions match the device IDs below, + * because they require special power management handling incompatible + * with the generic ACPI PM domain. + */ + static const struct acpi_device_id special_pm_ids[] = { + ACPI_FAN_DEVICE_IDS, + {} + }; struct acpi_device *adev = ACPI_COMPANION(dev); - if (!adev) + if (!adev || !acpi_match_device_ids(adev, special_pm_ids)) return 0; /* @@ -1275,8 +1472,65 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) acpi_device_wakeup_disable(adev); } - dev->pm_domain->detach = acpi_dev_pm_detach; return 1; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); + +/** + * acpi_storage_d3 - Check if D3 should be used in the suspend path + * @dev: Device to check + * + * Return %true if the platform firmware wants @dev to be programmed + * into D3hot or D3cold (if supported) in the suspend path, or %false + * when there is no specific preference. On some platforms, if this + * hint is ignored, @dev may remain unresponsive after suspending the + * platform as a whole. + * + * Although the property has storage in the name it actually is + * applied to the PCIe slot and plugging in a non-storage device the + * same platform restrictions will likely apply. + */ +bool acpi_storage_d3(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + u8 val; + + if (force_storage_d3()) + return true; + + if (!adev) + return false; + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", + &val)) + return false; + return val == 1; +} +EXPORT_SYMBOL_GPL(acpi_storage_d3); + +/** + * acpi_dev_state_d0 - Tell if the device is in D0 power state + * @dev: Physical device the ACPI power state of which to check + * + * On a system without ACPI, return true. On a system with ACPI, return true if + * the current ACPI power state of the device is D0, or false otherwise. + * + * Note that the power state of a device is not well-defined after it has been + * passed to acpi_device_set_power() and before that function returns, so it is + * not valid to ask for the ACPI power state of the device in that time frame. + * + * This function is intended to be used in a driver's probe or remove + * function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for + * more information. + */ +bool acpi_dev_state_d0(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (!adev) + return true; + + return adev->power.state == ACPI_STATE_D0; +} +EXPORT_SYMBOL_GPL(acpi_dev_state_d0); + #endif /* CONFIG_PM */ diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 545e91420cde..cd199fbe4dc9 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. * @@ -7,15 +8,6 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -61,6 +53,7 @@ static struct attribute *acpi_data_node_default_attrs[] = { &data_node_path.attr, NULL }; +ATTRIBUTE_GROUPS(acpi_data_node_default); #define to_data_node(k) container_of(k, struct acpi_data_node, kobj) #define to_attr(a) container_of(a, struct acpi_data_node_attr, attr) @@ -81,12 +74,13 @@ static const struct sysfs_ops acpi_data_node_sysfs_ops = { static void acpi_data_node_release(struct kobject *kobj) { struct acpi_data_node *dn = to_data_node(kobj); + complete(&dn->kobj_done); } -static struct kobj_type acpi_data_node_ktype = { +static const struct kobj_type acpi_data_node_ktype = { .sysfs_ops = &acpi_data_node_sysfs_ops, - .default_attrs = acpi_data_node_default_attrs, + .default_groups = acpi_data_node_default_groups, .release = acpi_data_node_release, }; @@ -138,8 +132,8 @@ static void acpi_hide_nondev_subnodes(struct acpi_device_data *data) * Return: 0: no _HID and no _CID * -EINVAL: output error * -ENOMEM: output is truncated -*/ -static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, + */ +static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { int len; @@ -164,8 +158,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, return 0; len = snprintf(modalias, size, "acpi:"); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; size -= len; @@ -174,8 +168,6 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, continue; count = snprintf(&modalias[len], size, "%s:", id->id); - if (count < 0) - return -EINVAL; if (count >= size) return -ENOMEM; @@ -183,7 +175,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, len += count; size -= count; } - modalias[len] = '\0'; + return len; } @@ -197,16 +189,20 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of * ACPI/PNP IDs. */ -static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, +static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; const union acpi_object *of_compatible, *obj; + acpi_status status; int len, count; int i, nval; char *c; - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); + status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); + if (ACPI_FAILURE(status)) + return -ENODEV; + /* DT strings are all in lower case */ for (c = buf.pointer; *c != '\0'; c++) *c = tolower(*c); @@ -214,8 +210,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; + + size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { @@ -228,8 +226,6 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, for (i = 0; i < nval; i++, obj++) { count = snprintf(&modalias[len], size, "C%s", obj->string.pointer); - if (count < 0) - return -EINVAL; if (count >= size) return -ENOMEM; @@ -237,11 +233,11 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, len += count; size -= count; } - modalias[len] = '\0'; + return len; } -int __acpi_device_uevent_modalias(struct acpi_device *adev, +int __acpi_device_uevent_modalias(const struct acpi_device *adev, struct kobj_uevent_env *env) { int len; @@ -255,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - if (len < 0) - return len; - - env->buflen += len; - if (!adev->data.of_compatible) - return 0; - - if (len > 0 && add_uevent_var(env, "MODALIAS=")) - return -ENOMEM; - - len = create_of_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); + if (adev->data.of_compatible) + len = create_of_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + else + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); if (len < 0) return len; @@ -279,19 +267,21 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, /** * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. + * @dev: Struct device to get ACPI device node. + * @env: Environment variables of the kobject uevent. * * Create the uevent modalias field for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ -int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) +int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); } EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); -static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) +static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size) { int len, count; @@ -324,6 +314,9 @@ static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) /** * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. + * @dev: Struct device to get ACPI device node. + * @buf: The buffer to save pnp_modalias and of_modalias. + * @size: Size of buffer. * * Create the modalias sysfs attribute for ACPI-enumerated devices. * @@ -337,11 +330,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size) EXPORT_SYMBOL_GPL(acpi_device_modalias); static ssize_t -acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) +modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); } -static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); +static DEVICE_ATTR_RO(modalias); static ssize_t real_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -370,8 +363,8 @@ static ssize_t power_state_show(struct device *dev, static DEVICE_ATTR_RO(power_state); static ssize_t -acpi_eject_store(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) +eject_store(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) { struct acpi_device *acpi_device = to_acpi_device(d); acpi_object_type not_used; @@ -381,92 +374,106 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) - && !acpi_device->driver) + && !d->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, ¬_used); if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) return -ENODEV; - get_device(&acpi_device->dev); + acpi_dev_get(acpi_device); status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); if (ACPI_SUCCESS(status)) return count; - put_device(&acpi_device->dev); + acpi_dev_put(acpi_device); acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; } -static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); +static DEVICE_ATTR_WO(eject); static ssize_t -acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) +hid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); } -static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); +static DEVICE_ATTR_RO(hid); -static ssize_t acpi_device_uid_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t uid_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); - return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); + return sprintf(buf, "%s\n", acpi_device_uid(acpi_dev)); } -static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); +static DEVICE_ATTR_RO(uid); -static ssize_t acpi_device_adr_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t adr_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); - return sprintf(buf, "0x%08x\n", - (unsigned int)(acpi_dev->pnp.bus_address)); + if (acpi_dev->pnp.bus_address > U32_MAX) + return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address); + else + return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address); } -static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); +static DEVICE_ATTR_RO(adr); -static ssize_t acpi_device_path_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t path_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return acpi_object_path(acpi_dev->handle, buf); } -static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); +static DEVICE_ATTR_RO(path); /* sysfs file that shows description text from the ACPI _STR method */ static ssize_t description_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *str_obj; + acpi_status status; int result; - if (acpi_dev->pnp.str_obj == NULL) - return 0; + status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR", + NULL, &buffer, + ACPI_TYPE_BUFFER); + if (ACPI_FAILURE(status)) + return -EIO; + + str_obj = buffer.pointer; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( - (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, - acpi_dev->pnp.str_obj->buffer.length, + (wchar_t *)str_obj->buffer.pointer, + str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, - PAGE_SIZE); + PAGE_SIZE - 1); buf[result++] = '\n'; + ACPI_FREE(str_obj); + return result; } static DEVICE_ATTR_RO(description); static ssize_t -acpi_device_sun_show(struct device *dev, struct device_attribute *attr, - char *buf) { +sun_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sun; @@ -477,11 +484,12 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%llu\n", sun); } -static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); +static DEVICE_ATTR_RO(sun); static ssize_t -acpi_device_hrv_show(struct device *dev, struct device_attribute *attr, - char *buf) { +hrv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long hrv; @@ -492,10 +500,11 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%llu\n", hrv); } -static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL); +static DEVICE_ATTR_RO(hrv); static ssize_t status_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sta; @@ -508,96 +517,97 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(status); -/** - * acpi_device_setup_files - Create sysfs attributes of an ACPI device. - * @dev: ACPI device object. - */ -int acpi_device_setup_files(struct acpi_device *dev) -{ - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - int result = 0; +static struct attribute *acpi_attrs[] = { + &dev_attr_path.attr, + &dev_attr_hid.attr, + &dev_attr_modalias.attr, + &dev_attr_description.attr, + &dev_attr_adr.attr, + &dev_attr_uid.attr, + &dev_attr_sun.attr, + &dev_attr_hrv.attr, + &dev_attr_status.attr, + &dev_attr_eject.attr, + &dev_attr_power_state.attr, + &dev_attr_real_power_state.attr, + NULL +}; +static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr) +{ /* * Devices gotten from FADT don't have a "path" attribute */ - if (dev->handle) { - result = device_create_file(&dev->dev, &dev_attr_path); - if (result) - goto end; - } + if (attr == &dev_attr_path) + return dev->handle; - if (!list_empty(&dev->pnp.ids)) { - result = device_create_file(&dev->dev, &dev_attr_hid); - if (result) - goto end; + if (attr == &dev_attr_hid || attr == &dev_attr_modalias) + return !list_empty(&dev->pnp.ids); - result = device_create_file(&dev->dev, &dev_attr_modalias); - if (result) - goto end; - } + if (attr == &dev_attr_description) + return acpi_has_method(dev->handle, "_STR"); - /* - * If device has _STR, 'description' file is created - */ - if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); - if (ACPI_FAILURE(status)) - buffer.pointer = NULL; - dev->pnp.str_obj = buffer.pointer; - result = device_create_file(&dev->dev, &dev_attr_description); - if (result) - goto end; - } + if (attr == &dev_attr_adr) + return dev->pnp.type.bus_address; - if (dev->pnp.type.bus_address) - result = device_create_file(&dev->dev, &dev_attr_adr); - if (dev->pnp.unique_id) - result = device_create_file(&dev->dev, &dev_attr_uid); + if (attr == &dev_attr_uid) + return acpi_device_uid(dev); - if (acpi_has_method(dev->handle, "_SUN")) { - result = device_create_file(&dev->dev, &dev_attr_sun); - if (result) - goto end; - } + if (attr == &dev_attr_sun) + return acpi_has_method(dev->handle, "_SUN"); - if (acpi_has_method(dev->handle, "_HRV")) { - result = device_create_file(&dev->dev, &dev_attr_hrv); - if (result) - goto end; - } + if (attr == &dev_attr_hrv) + return acpi_has_method(dev->handle, "_HRV"); - if (acpi_has_method(dev->handle, "_STA")) { - result = device_create_file(&dev->dev, &dev_attr_status); - if (result) - goto end; - } + if (attr == &dev_attr_status) + return acpi_has_method(dev->handle, "_STA"); /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ - if (acpi_has_method(dev->handle, "_EJ0")) { - result = device_create_file(&dev->dev, &dev_attr_eject); - if (result) - return result; - } + if (attr == &dev_attr_eject) + return acpi_has_method(dev->handle, "_EJ0"); - if (dev->flags.power_manageable) { - result = device_create_file(&dev->dev, &dev_attr_power_state); - if (result) - return result; + if (attr == &dev_attr_power_state) + return dev->flags.power_manageable; - if (dev->power.flags.power_resources) - result = device_create_file(&dev->dev, - &dev_attr_real_power_state); - } + if (attr == &dev_attr_real_power_state) + return dev->flags.power_manageable && dev->power.flags.power_resources; - acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); + dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name); + return false; +} -end: - return result; +static umode_t acpi_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int attrno) +{ + struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj)); + + if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr))) + return attr->mode; + else + return 0; +} + +static const struct attribute_group acpi_group = { + .attrs = acpi_attrs, + .is_visible = acpi_attr_is_visible, +}; + +const struct attribute_group *acpi_groups[] = { + &acpi_group, + NULL +}; + +/** + * acpi_device_setup_files - Create sysfs attributes of an ACPI device. + * @dev: ACPI device object. + */ +void acpi_device_setup_files(struct acpi_device *dev) +{ + acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); } /** @@ -607,41 +617,4 @@ end: void acpi_device_remove_files(struct acpi_device *dev) { acpi_hide_nondev_subnodes(&dev->data); - - if (dev->flags.power_manageable) { - device_remove_file(&dev->dev, &dev_attr_power_state); - if (dev->power.flags.power_resources) - device_remove_file(&dev->dev, - &dev_attr_real_power_state); - } - - /* - * If device has _STR, remove 'description' file - */ - if (acpi_has_method(dev->handle, "_STR")) { - kfree(dev->pnp.str_obj); - device_remove_file(&dev->dev, &dev_attr_description); - } - /* - * If device has _EJ0, remove 'eject' file. - */ - if (acpi_has_method(dev->handle, "_EJ0")) - device_remove_file(&dev->dev, &dev_attr_eject); - - if (acpi_has_method(dev->handle, "_SUN")) - device_remove_file(&dev->dev, &dev_attr_sun); - - if (acpi_has_method(dev->handle, "_HRV")) - device_remove_file(&dev->dev, &dev_attr_hrv); - - if (dev->pnp.unique_id) - device_remove_file(&dev->dev, &dev_attr_uid); - if (dev->pnp.type.bus_address) - device_remove_file(&dev->dev, &dev_attr_adr); - device_remove_file(&dev->dev, &dev_attr_modalias); - device_remove_file(&dev->dev, &dev_attr_hid); - if (acpi_has_method(dev->handle, "_STA")) - device_remove_file(&dev->dev, &dev_attr_status); - if (dev->handle) - device_remove_file(&dev->dev, &dev_attr_path); } diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index e3fc1f045e1c..34affbda295e 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * dock.c - ACPI dock station driver * * Copyright (C) 2006, 2014, Intel Corp. * Author: Kristen Carlson Accardi <kristen.c.accardi@intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> @@ -33,8 +20,6 @@ #include "internal.h" -ACPI_MODULE_NAME("dock"); - static bool immediate_undock = 1; module_param(immediate_undock, bool, 0644); MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to " @@ -103,43 +88,29 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, enum dock_callback_type cb_type) { struct acpi_device *adev = dd->adev; + acpi_hp_fixup fixup = NULL; + acpi_hp_uevent uevent = NULL; + acpi_hp_notify notify = NULL; acpi_lock_hp_context(); - if (!adev->hp) - goto out; - - if (cb_type == DOCK_CALL_FIXUP) { - void (*fixup)(struct acpi_device *); - - fixup = adev->hp->fixup; - if (fixup) { - acpi_unlock_hp_context(); - fixup(adev); - return; - } - } else if (cb_type == DOCK_CALL_UEVENT) { - void (*uevent)(struct acpi_device *, u32); - - uevent = adev->hp->uevent; - if (uevent) { - acpi_unlock_hp_context(); - uevent(adev, event); - return; - } - } else { - int (*notify)(struct acpi_device *, u32); - - notify = adev->hp->notify; - if (notify) { - acpi_unlock_hp_context(); - notify(adev, event); - return; - } + if (adev->hp) { + if (cb_type == DOCK_CALL_FIXUP) + fixup = adev->hp->fixup; + else if (cb_type == DOCK_CALL_UEVENT) + uevent = adev->hp->uevent; + else + notify = adev->hp->notify; } - out: acpi_unlock_hp_context(); + + if (fixup) + fixup(adev); + else if (uevent) + uevent(adev, event); + else if (notify) + notify(adev, event); } static struct dock_station *find_dock_station(acpi_handle handle) @@ -246,7 +217,8 @@ static void hot_remove_dock_devices(struct dock_station *ds) * between them). */ list_for_each_entry_reverse(dd, &ds->dependent_devices, list) - dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false); + dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, + DOCK_CALL_HANDLER); list_for_each_entry_reverse(dd, &ds->dependent_devices, list) acpi_bus_trim(dd->adev); @@ -285,6 +257,7 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) if (!acpi_device_enumerated(adev)) { int ret = acpi_bus_scan(adev->handle); + if (ret) dev_dbg(&adev->dev, "scan error %d\n", -ret); } @@ -393,6 +366,8 @@ static int dock_in_progress(struct dock_station *ds) /** * handle_eject_request - handle an undock request checking for error conditions + * @ds: The dock station to undock. + * @event: The ACPI event number associated with the undock request. * * Check to make sure the dock device is still present, then undock and * hotremove all the devices that may need removing. @@ -482,7 +457,7 @@ int dock_notify(struct acpi_device *adev, u32 event) surprise_removal = 1; event = ACPI_NOTIFY_EJECT_REQUEST; /* Fall back */ - /* fall through */ + fallthrough; case ACPI_NOTIFY_EJECT_REQUEST: begin_undock(ds); if ((immediate_undock && !(ds->flags & DOCK_IS_ATA)) @@ -498,34 +473,34 @@ int dock_notify(struct acpi_device *adev, u32 event) /* * show_docked - read method for "docked" file in sysfs */ -static ssize_t show_docked(struct device *dev, +static ssize_t docked_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dock_station *dock_station = dev->platform_data; - struct acpi_device *adev = NULL; + struct acpi_device *adev = acpi_fetch_acpi_dev(dock_station->handle); - acpi_bus_get_device(dock_station->handle, &adev); - return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev)); + return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev)); } -static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); +static DEVICE_ATTR_RO(docked); /* * show_flags - read method for flags file in sysfs */ -static ssize_t show_flags(struct device *dev, +static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dock_station *dock_station = dev->platform_data; - return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags); + + return sysfs_emit(buf, "%d\n", dock_station->flags); } -static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); +static DEVICE_ATTR_RO(flags); /* * write_undock - write method for "undock" file in sysfs */ -static ssize_t write_undock(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t undock_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int ret; struct dock_station *dock_station = dev->platform_data; @@ -537,29 +512,30 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, begin_undock(dock_station); ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); acpi_scan_lock_release(); - return ret ? ret: count; + return ret ? ret : count; } -static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); +static DEVICE_ATTR_WO(undock); /* * show_dock_uid - read method for "uid" file in sysfs */ -static ssize_t show_dock_uid(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t uid_show(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long long lbuf; struct dock_station *dock_station = dev->platform_data; + acpi_status status = acpi_evaluate_integer(dock_station->handle, "_UID", NULL, &lbuf); if (ACPI_FAILURE(status)) - return 0; + return 0; - return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf); + return sysfs_emit(buf, "%llx\n", lbuf); } -static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL); +static DEVICE_ATTR_RO(uid); -static ssize_t show_dock_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct dock_station *dock_station = dev->platform_data; char *type; @@ -573,9 +549,9 @@ static ssize_t show_dock_type(struct device *dev, else type = "unknown"; - return snprintf(buf, PAGE_SIZE, "%s\n", type); + return sysfs_emit(buf, "%s\n", type); } -static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL); +static DEVICE_ATTR_RO(type); static struct attribute *dock_attributes[] = { &dev_attr_docked.attr, diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig index 90a2fd979282..4b3fdc03e4ed 100644 --- a/drivers/acpi/dptf/Kconfig +++ b/drivers/acpi/dptf/Kconfig @@ -1,8 +1,22 @@ # SPDX-License-Identifier: GPL-2.0 -config DPTF_POWER - tristate "DPTF Platform Power Participant" + +menuconfig ACPI_DPTF + bool "Intel DPTF (Dynamic Platform and Thermal Framework) Support" depends on X86 help + Intel Dynamic Platform and Thermal Framework (DPTF) is a platform + level hardware/software solution for power and thermal management. + + As a container for multiple power/thermal technologies, DPTF provides + a coordinated approach for different policies to effect the hardware + state of a system. + +if ACPI_DPTF + +config DPTF_POWER + tristate "Platform Power DPTF Participant" + default m + help This driver adds support for Dynamic Platform and Thermal Framework (DPTF) Platform Power Participant device (INT3407) support. This participant is responsible for exposing platform telemetry: @@ -14,3 +28,19 @@ config DPTF_POWER To compile this driver as a module, choose M here: the module will be called dptf_power. + +config DPTF_PCH_FIVR + tristate "PCH FIVR DPTF Participant" + default m + help + This driver adds support for Dynamic Platform and Thermal Framework + (DPTF) PCH FIVR Participant device support. This driver allows to + switch the PCH FIVR (Fully Integrated Voltage Regulator) frequency. + This participant is responsible for exposing: + freq_mhz_low_clock + freq_mhz_high_clock + + To compile this driver as a module, choose M here: + the module will be called dptf_pch_fivr. + +endif diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile index 06ea8809583d..e912a3be1d28 100644 --- a/drivers/acpi/dptf/Makefile +++ b/drivers/acpi/dptf/Makefile @@ -1,4 +1,3 @@ -obj-$(CONFIG_ACPI) += int340x_thermal.o +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DPTF_POWER) += dptf_power.o - -ccflags-y += -Idrivers/acpi +obj-$(CONFIG_DPTF_PCH_FIVR) += dptf_pch_fivr.o diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c new file mode 100644 index 000000000000..8d7e555929d3 --- /dev/null +++ b/drivers/acpi/dptf/dptf_pch_fivr.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * dptf_pch_fivr: DPTF PCH FIVR Participant driver + * Copyright (c) 2020, Intel Corporation. + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +struct pch_fivr_resp { + u64 status; + u64 result; +}; + +static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp) +{ + struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp}; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer format = { sizeof("NN"), "NN" }; + union acpi_object *obj; + acpi_status status; + int ret = -EFAULT; + + status = acpi_evaluate_object(handle, method, NULL, &buffer); + if (ACPI_FAILURE(status)) + return ret; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE) + goto release_buffer; + + status = acpi_extract_package(obj, &format, &resp); + if (ACPI_FAILURE(status)) + goto release_buffer; + + if (fivr_resp->status) + goto release_buffer; + + ret = 0; + +release_buffer: + ACPI_FREE(buffer.pointer); + return ret; +} + +/* + * Presentation of attributes which are defined for INTC10xx + * They are: + * freq_mhz_low_clock : Set PCH FIVR switching freq for + * FIVR clock 19.2MHz and 24MHz + * freq_mhz_high_clock : Set PCH FIVR switching freq for + * FIVR clock 38.4MHz + */ +#define PCH_FIVR_SHOW(name, method) \ +static ssize_t name##_show(struct device *dev,\ + struct device_attribute *attr,\ + char *buf)\ +{\ + struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ + struct pch_fivr_resp fivr_resp;\ + int status;\ +\ + status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\ + if (status)\ + return status;\ +\ + return sprintf(buf, "%llu\n", fivr_resp.result);\ +} + +#define PCH_FIVR_STORE(name, method) \ +static ssize_t name##_store(struct device *dev,\ + struct device_attribute *attr,\ + const char *buf, size_t count)\ +{\ + struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ + acpi_status status;\ + u32 val;\ +\ + if (kstrtouint(buf, 0, &val) < 0)\ + return -EINVAL;\ +\ + status = acpi_execute_simple_method(acpi_dev->handle, #method, val);\ + if (ACPI_SUCCESS(status))\ + return count;\ +\ + return -EINVAL;\ +} + +PCH_FIVR_SHOW(freq_mhz_low_clock, GFC0) +PCH_FIVR_SHOW(freq_mhz_high_clock, GFC1) +PCH_FIVR_SHOW(ssc_clock_info, GEMI) +PCH_FIVR_SHOW(fivr_switching_freq_mhz, GFCS) +PCH_FIVR_SHOW(fivr_switching_fault_status, GFFS) +PCH_FIVR_STORE(freq_mhz_low_clock, RFC0) +PCH_FIVR_STORE(freq_mhz_high_clock, RFC1) + +static DEVICE_ATTR_RW(freq_mhz_low_clock); +static DEVICE_ATTR_RW(freq_mhz_high_clock); +static DEVICE_ATTR_RO(ssc_clock_info); +static DEVICE_ATTR_RO(fivr_switching_freq_mhz); +static DEVICE_ATTR_RO(fivr_switching_fault_status); + +static struct attribute *fivr_attrs[] = { + &dev_attr_freq_mhz_low_clock.attr, + &dev_attr_freq_mhz_high_clock.attr, + &dev_attr_ssc_clock_info.attr, + &dev_attr_fivr_switching_freq_mhz.attr, + &dev_attr_fivr_switching_fault_status.attr, + NULL +}; + +static const struct attribute_group pch_fivr_attribute_group = { + .attrs = fivr_attrs, + .name = "pch_fivr_switch_frequency" +}; + +static int pch_fivr_add(struct platform_device *pdev) +{ + struct acpi_device *acpi_dev; + unsigned long long ptype; + acpi_status status; + int result; + + acpi_dev = ACPI_COMPANION(&(pdev->dev)); + if (!acpi_dev) + return -ENODEV; + + status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype); + if (ACPI_FAILURE(status) || ptype != 0x05) + return -ENODEV; + + result = sysfs_create_group(&pdev->dev.kobj, + &pch_fivr_attribute_group); + if (result) + return result; + + platform_set_drvdata(pdev, acpi_dev); + + return 0; +} + +static void pch_fivr_remove(struct platform_device *pdev) +{ + sysfs_remove_group(&pdev->dev.kobj, &pch_fivr_attribute_group); +} + +static const struct acpi_device_id pch_fivr_device_ids[] = { + {"INTC1045", 0}, + {"INTC1049", 0}, + {"INTC1064", 0}, + {"INTC106B", 0}, + {"INTC10A3", 0}, + {"INTC10D7", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids); + +static struct platform_driver pch_fivr_driver = { + .probe = pch_fivr_add, + .remove = pch_fivr_remove, + .driver = { + .name = "dptf_pch_fivr", + .acpi_match_table = pch_fivr_device_ids, + }, +}; + +module_platform_driver(pch_fivr_driver); + +MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ACPI DPTF PCH FIVR driver"); diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index e1c242568341..55ccbb8ddbe3 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * dptf_power: DPTF platform power driver * Copyright (c) 2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #include <linux/kernel.h> @@ -19,20 +10,24 @@ #include <linux/platform_device.h> /* - * Presentation of attributes which are defined for INT3407. They are: - * PMAX : Maximum platform powe + * Presentation of attributes which are defined for INT3407 and INT3532. + * They are: + * PMAX : Maximum platform power * PSRC : Platform power source * ARTG : Adapter rating * CTYP : Charger type - * PBSS : Battery steady power + * PROP : Rest of worst case platform Power + * PBSS : Power Battery Steady State + * RBHF : High Frequency Impedance + * VBNL : Instantaneous No-Load Voltage + * CMPP : Current Discharge Capability */ #define DPTF_POWER_SHOW(name, object) \ static ssize_t name##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ - struct platform_device *pdev = to_platform_device(dev);\ - struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\ + struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ unsigned long long val;\ acpi_status status;\ \ @@ -49,12 +44,42 @@ DPTF_POWER_SHOW(platform_power_source, PSRC) DPTF_POWER_SHOW(adapter_rating_mw, ARTG) DPTF_POWER_SHOW(battery_steady_power_mw, PBSS) DPTF_POWER_SHOW(charger_type, CTYP) +DPTF_POWER_SHOW(rest_of_platform_power_mw, PROP) +DPTF_POWER_SHOW(max_steady_state_power_mw, PBSS) +DPTF_POWER_SHOW(high_freq_impedance_mohm, RBHF) +DPTF_POWER_SHOW(no_load_voltage_mv, VBNL) +DPTF_POWER_SHOW(current_discharge_capbility_ma, CMPP); static DEVICE_ATTR_RO(max_platform_power_mw); static DEVICE_ATTR_RO(platform_power_source); static DEVICE_ATTR_RO(adapter_rating_mw); static DEVICE_ATTR_RO(battery_steady_power_mw); static DEVICE_ATTR_RO(charger_type); +static DEVICE_ATTR_RO(rest_of_platform_power_mw); +static DEVICE_ATTR_RO(max_steady_state_power_mw); +static DEVICE_ATTR_RO(high_freq_impedance_mohm); +static DEVICE_ATTR_RO(no_load_voltage_mv); +static DEVICE_ATTR_RO(current_discharge_capbility_ma); + +static ssize_t prochot_confirm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct acpi_device *acpi_dev = dev_get_drvdata(dev); + acpi_status status; + int seq_no; + + if (kstrtouint(buf, 0, &seq_no) < 0) + return -EINVAL; + + status = acpi_execute_simple_method(acpi_dev->handle, "PBOK", seq_no); + if (ACPI_SUCCESS(status)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_WO(prochot_confirm); static struct attribute *dptf_power_attrs[] = { &dev_attr_max_platform_power_mw.attr, @@ -62,6 +87,8 @@ static struct attribute *dptf_power_attrs[] = { &dev_attr_adapter_rating_mw.attr, &dev_attr_battery_steady_power_mw.attr, &dev_attr_charger_type.attr, + &dev_attr_rest_of_platform_power_mw.attr, + &dev_attr_prochot_confirm.attr, NULL }; @@ -70,10 +97,82 @@ static const struct attribute_group dptf_power_attribute_group = { .name = "dptf_power" }; +static struct attribute *dptf_battery_attrs[] = { + &dev_attr_max_platform_power_mw.attr, + &dev_attr_max_steady_state_power_mw.attr, + &dev_attr_high_freq_impedance_mohm.attr, + &dev_attr_no_load_voltage_mv.attr, + &dev_attr_current_discharge_capbility_ma.attr, + NULL +}; + +static const struct attribute_group dptf_battery_attribute_group = { + .attrs = dptf_battery_attrs, + .name = "dptf_battery" +}; + +#define MAX_POWER_CHANGED 0x80 +#define POWER_STATE_CHANGED 0x81 +#define STEADY_STATE_POWER_CHANGED 0x83 +#define POWER_PROP_CHANGE_EVENT 0x84 +#define IMPEDANCE_CHANGED 0x85 +#define VOLTAGE_CURRENT_CHANGED 0x86 + +static long long dptf_participant_type(acpi_handle handle) +{ + unsigned long long ptype; + acpi_status status; + + status = acpi_evaluate_integer(handle, "PTYP", NULL, &ptype); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return ptype; +} + +static void dptf_power_notify(acpi_handle handle, u32 event, void *data) +{ + struct platform_device *pdev = data; + char *attr; + + switch (event) { + case POWER_STATE_CHANGED: + attr = "platform_power_source"; + break; + case POWER_PROP_CHANGE_EVENT: + attr = "rest_of_platform_power_mw"; + break; + case MAX_POWER_CHANGED: + attr = "max_platform_power_mw"; + break; + case STEADY_STATE_POWER_CHANGED: + attr = "max_steady_state_power_mw"; + break; + case IMPEDANCE_CHANGED: + attr = "high_freq_impedance_mohm"; + break; + case VOLTAGE_CURRENT_CHANGED: + attr = "no_load_voltage_mv"; + break; + default: + dev_err(&pdev->dev, "Unsupported event [0x%x]\n", event); + return; + } + + /* + * Notify that an attribute is changed, so that user space can read + * again. + */ + if (dptf_participant_type(handle) == 0x0CULL) + sysfs_notify(&pdev->dev.kobj, "dptf_battery", attr); + else + sysfs_notify(&pdev->dev.kobj, "dptf_power", attr); +} + static int dptf_power_add(struct platform_device *pdev) { + const struct attribute_group *attr_group; struct acpi_device *acpi_dev; - acpi_status status; unsigned long long ptype; int result; @@ -81,33 +180,68 @@ static int dptf_power_add(struct platform_device *pdev) if (!acpi_dev) return -ENODEV; - status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype); - if (ACPI_FAILURE(status)) + ptype = dptf_participant_type(acpi_dev->handle); + if (ptype == 0x11) + attr_group = &dptf_power_attribute_group; + else if (ptype == 0x0C) + attr_group = &dptf_battery_attribute_group; + else return -ENODEV; - if (ptype != 0x11) - return -ENODEV; + result = acpi_install_notify_handler(acpi_dev->handle, + ACPI_DEVICE_NOTIFY, + dptf_power_notify, + (void *)pdev); + if (result) + return result; result = sysfs_create_group(&pdev->dev.kobj, - &dptf_power_attribute_group); - if (result) + attr_group); + if (result) { + acpi_remove_notify_handler(acpi_dev->handle, + ACPI_DEVICE_NOTIFY, + dptf_power_notify); return result; + } platform_set_drvdata(pdev, acpi_dev); return 0; } -static int dptf_power_remove(struct platform_device *pdev) +static void dptf_power_remove(struct platform_device *pdev) { + struct acpi_device *acpi_dev = platform_get_drvdata(pdev); - sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group); + acpi_remove_notify_handler(acpi_dev->handle, + ACPI_DEVICE_NOTIFY, + dptf_power_notify); - return 0; + if (dptf_participant_type(acpi_dev->handle) == 0x0CULL) + sysfs_remove_group(&pdev->dev.kobj, &dptf_battery_attribute_group); + else + sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group); } static const struct acpi_device_id int3407_device_ids[] = { {"INT3407", 0}, + {"INT3532", 0}, + {"INTC1047", 0}, + {"INTC1050", 0}, + {"INTC1060", 0}, + {"INTC1061", 0}, + {"INTC1065", 0}, + {"INTC1066", 0}, + {"INTC106C", 0}, + {"INTC106D", 0}, + {"INTC10A4", 0}, + {"INTC10A5", 0}, + {"INTC10D8", 0}, + {"INTC10D9", 0}, + {"INTC1100", 0}, + {"INTC1101", 0}, + {"INTC10F7", 0}, + {"INTC10F8", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3407_device_ids); @@ -116,7 +250,7 @@ static struct platform_driver dptf_power_driver = { .probe = dptf_power_add, .remove = dptf_power_remove, .driver = { - .name = "DPTF Platform Power", + .name = "dptf_power", .acpi_match_table = int3407_device_ids, }, }; diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c deleted file mode 100644 index 86364097e236..000000000000 --- a/drivers/acpi/dptf/int340x_thermal.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * ACPI support for int340x thermal drivers - * - * Copyright (C) 2014, Intel Corporation - * Authors: Zhang Rui <rui.zhang@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/acpi.h> -#include <linux/module.h> - -#include "internal.h" - -#define INT3401_DEVICE 0X01 -static const struct acpi_device_id int340x_thermal_device_ids[] = { - {"INT3400"}, - {"INT3401", INT3401_DEVICE}, - {"INT3402"}, - {"INT3403"}, - {"INT3404"}, - {"INT3406"}, - {"INT3407"}, - {"INT3408"}, - {"INT3409"}, - {"INT340A"}, - {"INT340B"}, - {""}, -}; - -static int int340x_thermal_handler_attach(struct acpi_device *adev, - const struct acpi_device_id *id) -{ - if (IS_ENABLED(CONFIG_INT340X_THERMAL)) - acpi_create_platform_device(adev, NULL); - /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ - else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && - id->driver_data == INT3401_DEVICE) - acpi_create_platform_device(adev, NULL); - return 1; -} - -static struct acpi_scan_handler int340x_thermal_handler = { - .ids = int340x_thermal_device_ids, - .attach = int340x_thermal_handler_attach, -}; - -void __init acpi_int340x_thermal_init(void) -{ - acpi_scan_add_handler(&int340x_thermal_handler); -} diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9d66a47d32fb..59b3d50ff01e 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * ec.c - ACPI Embedded Controller Driver (v3) * @@ -9,20 +10,6 @@ * 2001, 2002 Andy Grover <andrew.grover@intel.com> * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Uncomment next line to get verbose printout */ @@ -36,8 +23,11 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/string.h> +#include <linux/suspend.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <asm/io.h> @@ -46,7 +36,6 @@ #define ACPI_EC_CLASS "embedded_controller" #define ACPI_EC_DEVICE_NAME "Embedded Controller" -#define ACPI_EC_FILE_INFO "info" /* EC status register */ #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ @@ -105,14 +94,13 @@ enum ec_command { enum { EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ - EC_FLAGS_QUERY_PENDING, /* Query is pending */ - EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ - EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */ + EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ - EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */ + EC_FLAGS_EC_REG_CALLED, /* OpReg ACPI _REG method called */ + EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */ EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STOPPED, /* Driver is stopped */ - EC_FLAGS_GPE_MASKED, /* GPE masked */ + EC_FLAGS_EVENTS_MASKED, /* Events masked */ }; #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ @@ -146,7 +134,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8; module_param(ec_storm_threshold, uint, 0644); MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); -static bool ec_freeze_events __read_mostly = false; +static bool ec_freeze_events __read_mostly; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); @@ -179,21 +167,24 @@ struct acpi_ec_query { struct transaction transaction; struct work_struct work; struct acpi_ec_query_handler *handler; + struct acpi_ec *ec; }; -static int acpi_ec_query(struct acpi_ec *ec, u8 *data); -static void advance_transaction(struct acpi_ec *ec); +static int acpi_ec_submit_query(struct acpi_ec *ec); +static void advance_transaction(struct acpi_ec *ec, bool interrupt); static void acpi_ec_event_handler(struct work_struct *work); -static void acpi_ec_event_processor(struct work_struct *work); -struct acpi_ec *boot_ec, *first_ec; +struct acpi_ec *first_ec; EXPORT_SYMBOL(first_ec); -static bool boot_ec_is_ecdt = false; + +static struct acpi_ec *boot_ec; +static bool boot_ec_is_ecdt; +static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; -static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ -static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ +static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- * Logging/Debugging @@ -345,12 +336,12 @@ static const char *acpi_ec_cmd_string(u8 cmd) * GPE Registers * -------------------------------------------------------------------------- */ -static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec) +static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec) { acpi_event_status gpe_status = 0; (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); - return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false; + return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET); } static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) @@ -361,14 +352,14 @@ static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) BUG_ON(ec->reference_count < 1); acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); } - if (acpi_ec_is_gpe_raised(ec)) { + if (acpi_ec_gpe_status_set(ec)) { /* * On some platforms, EN=1 writes cannot trigger GPE. So * software need to manually trigger a pseudo GPE event on * EN=1 writes. */ ec_dbg_raw("Polling quirk"); - advance_transaction(ec); + advance_transaction(ec, false); } } @@ -382,23 +373,6 @@ static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close) } } -static inline void acpi_ec_clear_gpe(struct acpi_ec *ec) -{ - /* - * GPE STS is a W1C register, which means: - * 1. Software can clear it without worrying about clearing other - * GPEs' STS bits when the hardware sets them in parallel. - * 2. As long as software can ensure only clearing it when it is - * set, hardware won't set it in parallel. - * So software can clear GPE in any contexts. - * Warning: do not move the check into advance_transaction() as the - * EC commands will be sent without GPE raised. - */ - if (!acpi_ec_is_gpe_raised(ec)) - return; - acpi_clear_gpe(NULL, ec->gpe); -} - /* -------------------------------------------------------------------------- * Transaction Management * -------------------------------------------------------------------------- */ @@ -406,8 +380,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec) static void acpi_ec_submit_request(struct acpi_ec *ec) { ec->reference_count++; - if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) && - ec->reference_count == 1) + if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && + ec->gpe >= 0 && ec->reference_count == 1) acpi_ec_enable_gpe(ec, true); } @@ -416,28 +390,36 @@ static void acpi_ec_complete_request(struct acpi_ec *ec) bool flushed = false; ec->reference_count--; - if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) && - ec->reference_count == 0) + if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && + ec->gpe >= 0 && ec->reference_count == 0) acpi_ec_disable_gpe(ec, true); flushed = acpi_ec_flushed(ec); if (flushed) wake_up(&ec->wait); } -static void acpi_ec_mask_gpe(struct acpi_ec *ec) +static void acpi_ec_mask_events(struct acpi_ec *ec) { - if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) { - acpi_ec_disable_gpe(ec, false); + if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { + if (ec->gpe >= 0) + acpi_ec_disable_gpe(ec, false); + else + disable_irq_nosync(ec->irq); + ec_dbg_drv("Polling enabled"); - set_bit(EC_FLAGS_GPE_MASKED, &ec->flags); + set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); } } -static void acpi_ec_unmask_gpe(struct acpi_ec *ec) +static void acpi_ec_unmask_events(struct acpi_ec *ec) { - if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) { - clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags); - acpi_ec_enable_gpe(ec, false); + if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { + clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); + if (ec->gpe >= 0) + acpi_ec_enable_gpe(ec, false); + else + enable_irq(ec->irq); + ec_dbg_drv("Polling disabled"); } } @@ -461,25 +443,51 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) return true; } -static void acpi_ec_submit_query(struct acpi_ec *ec) +static void acpi_ec_submit_event(struct acpi_ec *ec) { - acpi_ec_mask_gpe(ec); + /* + * It is safe to mask the events here, because acpi_ec_close_event() + * will run at least once after this. + */ + acpi_ec_mask_events(ec); if (!acpi_ec_event_enabled(ec)) return; - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { - ec_dbg_evt("Command(%s) submitted/blocked", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - ec->nr_pending_queries++; - schedule_work(&ec->work); - } + + if (ec->event_state != EC_EVENT_READY) + return; + + ec_dbg_evt("Command(%s) submitted/blocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + + ec->event_state = EC_EVENT_IN_PROGRESS; + /* + * If events_to_process is greater than 0 at this point, the while () + * loop in acpi_ec_event_handler() is still running and incrementing + * events_to_process will cause it to invoke acpi_ec_submit_query() once + * more, so it is not necessary to queue up the event work to start the + * same loop again. + */ + if (ec->events_to_process++ > 0) + return; + + ec->events_in_progress++; + queue_work(ec_wq, &ec->work); } -static void acpi_ec_complete_query(struct acpi_ec *ec) +static void acpi_ec_complete_event(struct acpi_ec *ec) { - if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) + if (ec->event_state == EC_EVENT_IN_PROGRESS) + ec->event_state = EC_EVENT_COMPLETE; +} + +static void acpi_ec_close_event(struct acpi_ec *ec) +{ + if (ec->event_state != EC_EVENT_READY) ec_dbg_evt("Command(%s) unblocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - acpi_ec_unmask_gpe(ec); + + ec->event_state = EC_EVENT_READY; + acpi_ec_unmask_events(ec); } static inline void __acpi_ec_enable_event(struct acpi_ec *ec) @@ -490,7 +498,7 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec) * Unconditionally invoke this once after enabling the event * handling mechanism to detect the pending events. */ - advance_transaction(ec); + advance_transaction(ec, false); } static inline void __acpi_ec_disable_event(struct acpi_ec *ec) @@ -499,37 +507,43 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) ec_log_drv("event blocked"); } -static void acpi_ec_enable_event(struct acpi_ec *ec) +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) { - unsigned long flags; + int i; - spin_lock_irqsave(&ec->lock, flags); - if (acpi_ec_started(ec)) - __acpi_ec_enable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + if (acpi_ec_submit_query(ec)) + break; + } + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); } -#ifdef CONFIG_PM_SLEEP -static bool acpi_ec_query_flushed(struct acpi_ec *ec) +static void acpi_ec_enable_event(struct acpi_ec *ec) { - bool flushed; unsigned long flags; spin_lock_irqsave(&ec->lock, flags); - flushed = !ec->nr_pending_queries; + if (acpi_ec_started(ec)) + __acpi_ec_enable_event(ec); spin_unlock_irqrestore(&ec->lock, flags); - return flushed; + + /* Drain additional events if hardware requires that */ + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); } -static void __acpi_ec_flush_event(struct acpi_ec *ec) +#ifdef CONFIG_PM_SLEEP +static void __acpi_ec_flush_work(void) { - /* - * When ec_freeze_events is true, we need to flush events in - * the proper position before entering the noirq stage. - */ - wait_event(ec->wait, acpi_ec_query_flushed(ec)); - if (ec_query_wq) - flush_workqueue(ec_query_wq); + flush_workqueue(ec_wq); /* flush ec->work */ + flush_workqueue(ec_query_wq); /* flush queries */ } static void acpi_ec_disable_event(struct acpi_ec *ec) @@ -539,22 +553,28 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_lock_irqsave(&ec->lock, flags); __acpi_ec_disable_event(ec); spin_unlock_irqrestore(&ec->lock, flags); - __acpi_ec_flush_event(ec); + + /* + * When ec_freeze_events is true, we need to flush events in + * the proper position before entering the noirq stage. + */ + __acpi_ec_flush_work(); } void acpi_ec_flush_work(void) { - if (first_ec) - __acpi_ec_flush_event(first_ec); + /* Without ec_wq there is nothing to flush. */ + if (!ec_wq) + return; - flush_scheduled_work(); + __acpi_ec_flush_work(); } #endif /* CONFIG_PM_SLEEP */ static bool acpi_ec_guard_event(struct acpi_ec *ec) { - bool guarded = true; unsigned long flags; + bool guarded; spin_lock_irqsave(&ec->lock, flags); /* @@ -563,19 +583,15 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec) * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an * acceptable period. * - * The guarding period begins when EC_FLAGS_QUERY_PENDING is - * flagged, which means SCI_EVT check has just been performed. - * But if the current transaction is ACPI_EC_COMMAND_QUERY, the - * guarding should have already been performed (via - * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the - * ACPI_EC_COMMAND_QUERY transaction can be transitioned into - * ACPI_EC_COMMAND_POLL state immediately. + * The guarding period is applicable if the event state is not + * EC_EVENT_READY, but otherwise if the current transaction is of the + * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already + * and it should not be applied to let the transaction transition into + * the ACPI_EC_COMMAND_POLL state immediately. */ - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || - ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY || - !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || - (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) - guarded = false; + guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + ec->event_state != EC_EVENT_READY && + (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); spin_unlock_irqrestore(&ec->lock, flags); return guarded; } @@ -607,108 +623,95 @@ static int ec_transaction_completed(struct acpi_ec *ec) static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) { ec->curr->flags |= flag; - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS && - flag == ACPI_EC_COMMAND_POLL) - acpi_ec_complete_query(ec); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY && - flag == ACPI_EC_COMMAND_COMPLETE) - acpi_ec_complete_query(ec); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && - flag == ACPI_EC_COMMAND_COMPLETE) - set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + + if (ec->curr->command != ACPI_EC_COMMAND_QUERY) + return; + + switch (ec_event_clearing) { + case ACPI_EC_EVT_TIMING_STATUS: + if (flag == ACPI_EC_COMMAND_POLL) + acpi_ec_close_event(ec); + + return; + + case ACPI_EC_EVT_TIMING_QUERY: + if (flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_close_event(ec); + + return; + + case ACPI_EC_EVT_TIMING_EVENT: + if (flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_complete_event(ec); } } -static void advance_transaction(struct acpi_ec *ec) +static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t) { - struct transaction *t; - u8 status; + if (t->irq_count < ec_storm_threshold) + ++t->irq_count; + + /* Trigger if the threshold is 0 too. */ + if (t->irq_count == ec_storm_threshold) + acpi_ec_mask_events(ec); +} + +static void advance_transaction(struct acpi_ec *ec, bool interrupt) +{ + struct transaction *t = ec->curr; bool wakeup = false; + u8 status; + + ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id()); - ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK", - smp_processor_id()); - /* - * By always clearing STS before handling all indications, we can - * ensure a hardware STS 0->1 change after this clearing can always - * trigger a GPE interrupt. - */ - acpi_ec_clear_gpe(ec); status = acpi_ec_read_status(ec); - t = ec->curr; + /* * Another IRQ or a guarded polling mode advancement is detected, * the next QR_EC submission is then allowed. */ if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && - (!ec->nr_pending_queries || - test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { - clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); - acpi_ec_complete_query(ec); - } + ec->event_state == EC_EVENT_COMPLETE) + acpi_ec_close_event(ec); + + if (!t) + goto out; } - if (!t) - goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) + if (!(status & ACPI_EC_FLAG_IBF)) acpi_ec_write_data(ec, t->wdata[t->wi++]); - else - goto err; + else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) + acpi_ec_spurious_interrupt(ec, t); } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { + if (status & ACPI_EC_FLAG_OBF) { t->rdata[t->ri++] = acpi_ec_read_data(ec); if (t->rlen == t->ri) { ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); + wakeup = true; if (t->command == ACPI_EC_COMMAND_QUERY) ec_dbg_evt("Command(%s) completed by hardware", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - wakeup = true; } - } else - goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) { - ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); - wakeup = true; - } - goto out; - } else { - if (EC_FLAGS_QUERY_HANDSHAKE && - !(status & ACPI_EC_FLAG_SCI) && - (t->command == ACPI_EC_COMMAND_QUERY)) { - ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); - t->rdata[t->ri++] = 0x00; + } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) { + acpi_ec_spurious_interrupt(ec, t); + } + } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) { ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); - ec_dbg_evt("Command(%s) completed by software", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); wakeup = true; - } else if ((status & ACPI_EC_FLAG_IBF) == 0) { - acpi_ec_write_cmd(ec, t->command); - ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); - } else - goto err; - goto out; - } -err: - /* - * If SCI bit is set, then don't think it's a false IRQ - * otherwise will take a not handled IRQ as a false one. - */ - if (!(status & ACPI_EC_FLAG_SCI)) { - if (in_interrupt() && t) { - if (t->irq_count < ec_storm_threshold) - ++t->irq_count; - /* Allow triggering on 0 threshold */ - if (t->irq_count == ec_storm_threshold) - acpi_ec_mask_gpe(ec); } + } else if (!(status & ACPI_EC_FLAG_IBF)) { + acpi_ec_write_cmd(ec, t->command); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); } + out: if (status & ACPI_EC_FLAG_SCI) - acpi_ec_submit_query(ec); - if (wakeup && in_interrupt()) + acpi_ec_submit_event(ec); + + if (wakeup && interrupt) wake_up(&ec->wait); } @@ -765,7 +768,7 @@ static int ec_poll(struct acpi_ec *ec) if (!ec_guard(ec)) return 0; spin_lock_irqsave(&ec->lock, flags); - advance_transaction(ec); + advance_transaction(ec, false); spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); @@ -782,6 +785,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; + if (t->rdata) + memset(t->rdata, 0, t->rlen); + /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -800,7 +806,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, spin_lock_irqsave(&ec->lock, tmp); if (t->irq_count == ec_storm_threshold) - acpi_ec_unmask_gpe(ec); + acpi_ec_unmask_events(ec); ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); ec->curr = NULL; /* Disable GPE for command processing (IBF=0/OBF=1) */ @@ -818,8 +824,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; - if (t->rdata) - memset(t->rdata, 0, t->rlen); mutex_lock(&ec->mutex); if (ec->global_lock) { @@ -846,7 +850,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec) .wdata = NULL, .rdata = &d, .wlen = 0, .rlen = 1}; - return acpi_ec_transaction(ec, &t); + return acpi_ec_transaction_unlocked(ec, &t); } static int acpi_ec_burst_disable(struct acpi_ec *ec) @@ -856,7 +860,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) .wlen = 0, .rlen = 0}; return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? - acpi_ec_transaction(ec, &t) : 0; + acpi_ec_transaction_unlocked(ec, &t) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) @@ -872,6 +876,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) return result; } +static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) +{ + int result; + u8 d; + struct transaction t = {.command = ACPI_EC_COMMAND_READ, + .wdata = &address, .rdata = &d, + .wlen = 1, .rlen = 1}; + + result = acpi_ec_transaction_unlocked(ec, &t); + *data = d; + return result; +} + static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; @@ -882,6 +899,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) return acpi_ec_transaction(ec, &t); } +static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) +{ + u8 wdata[2] = { address, data }; + struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, + .wdata = wdata, .rdata = NULL, + .wlen = 2, .rlen = 0}; + + return acpi_ec_transaction_unlocked(ec, &t); +} + int ec_read(u8 addr, u8 *val) { int err; @@ -902,14 +929,10 @@ EXPORT_SYMBOL(ec_read); int ec_write(u8 addr, u8 val) { - int err; - if (!first_ec) return -ENODEV; - err = acpi_ec_write(first_ec, addr, val); - - return err; + return acpi_ec_write(first_ec, addr, val); } EXPORT_SYMBOL(ec_write); @@ -1034,50 +1057,24 @@ void acpi_ec_unblock_transactions(void) acpi_ec_start(first_ec, true); } -void acpi_ec_mark_gpe_for_wake(void) -{ - if (first_ec && !ec_no_wakeup) - acpi_mark_gpe_for_wake(NULL, first_ec->gpe); -} - -void acpi_ec_set_gpe_wake_mask(u8 action) -{ - if (first_ec && !ec_no_wakeup) - acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); -} - -void acpi_ec_dispatch_gpe(void) -{ - if (first_ec) - acpi_dispatch_gpe(NULL, first_ec->gpe); -} - /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - -static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) @@ -1097,9 +1094,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data) { - struct acpi_ec_query_handler *handler = - kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); + struct acpi_ec_query_handler *handler; + if (!handle && !func) + return -EINVAL; + + handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler) return -ENOMEM; @@ -1111,6 +1111,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, kref_init(&handler->kref); list_add(&handler->node, &ec->list); mutex_unlock(&ec->mutex); + return 0; } EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); @@ -1123,9 +1124,16 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { - if (remove_all || query_bit == handler->query_bit) { + /* + * When remove_all is false, only remove custom query handlers + * which have handler->func set. This is done to preserve query + * handlers discovered thru ACPI, as they should continue handling + * EC queries. + */ + if (remove_all || (handler->func && handler->query_bit == query_bit)) { list_del_init(&handler->node); list_add(&handler->node, &free_list); + } } mutex_unlock(&ec->mutex); @@ -1136,10 +1144,34 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { acpi_ec_remove_query_handlers(ec, false, query_bit); + flush_workqueue(ec_query_wq); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) +static void acpi_ec_event_processor(struct work_struct *work) +{ + struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); + struct acpi_ec_query_handler *handler = q->handler; + struct acpi_ec *ec = q->ec; + + ec_dbg_evt("Query(0x%02x) started", handler->query_bit); + + if (handler->func) + handler->func(handler->data); + else if (handler->handle) + acpi_evaluate_object(handler->handle, NULL, NULL, NULL); + + ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); + + spin_lock_irq(&ec->lock); + ec->queries_in_progress--; + spin_unlock_irq(&ec->lock); + + acpi_ec_put_query_handler(handler); + kfree(q); +} + +static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) { struct acpi_ec_query *q; struct transaction *t; @@ -1147,44 +1179,23 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); if (!q) return NULL; + INIT_WORK(&q->work, acpi_ec_event_processor); t = &q->transaction; t->command = ACPI_EC_COMMAND_QUERY; t->rdata = pval; t->rlen = 1; + q->ec = ec; return q; } -static void acpi_ec_delete_query(struct acpi_ec_query *q) -{ - if (q) { - if (q->handler) - acpi_ec_put_query_handler(q->handler); - kfree(q); - } -} - -static void acpi_ec_event_processor(struct work_struct *work) -{ - struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); - struct acpi_ec_query_handler *handler = q->handler; - - ec_dbg_evt("Query(0x%02x) started", handler->query_bit); - if (handler->func) - handler->func(handler->data); - else if (handler->handle) - acpi_evaluate_object(handler->handle, NULL, NULL, NULL); - ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - acpi_ec_delete_query(q); -} - -static int acpi_ec_query(struct acpi_ec *ec, u8 *data) +static int acpi_ec_submit_query(struct acpi_ec *ec) { + struct acpi_ec_query *q; u8 value = 0; int result; - struct acpi_ec_query *q; - q = acpi_ec_create_query(&value); + q = acpi_ec_create_query(ec, &value); if (!q) return -ENOMEM; @@ -1194,11 +1205,14 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) * bit to be cleared (and thus clearing the interrupt source). */ result = acpi_ec_transaction(ec, &q->transaction); - if (!value) - result = -ENODATA; if (result) goto err_exit; + if (!value) { + result = -ENODATA; + goto err_exit; + } + q->handler = acpi_ec_get_query_handler_by_value(ec, value); if (!q->handler) { result = -ENODATA; @@ -1206,90 +1220,123 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } /* - * It is reported that _Qxx are evaluated in a parallel way on - * Windows: + * It is reported that _Qxx are evaluated in a parallel way on Windows: * https://bugzilla.kernel.org/show_bug.cgi?id=94411 * - * Put this log entry before schedule_work() in order to make - * it appearing before any other log entries occurred during the - * work queue execution. + * Put this log entry before queue_work() to make it appear in the log + * before any other messages emitted during workqueue handling. */ ec_dbg_evt("Query(0x%02x) scheduled", value); - if (!queue_work(ec_query_wq, &q->work)) { - ec_dbg_evt("Query(0x%02x) overlapped", value); - result = -EBUSY; - } -err_exit: - if (result) - acpi_ec_delete_query(q); - if (data) - *data = value; - return result; -} + spin_lock_irq(&ec->lock); -static void acpi_ec_check_event(struct acpi_ec *ec) -{ - unsigned long flags; + ec->queries_in_progress++; + queue_work(ec_query_wq, &q->work); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { - if (ec_guard(ec)) { - spin_lock_irqsave(&ec->lock, flags); - /* - * Take care of the SCI_EVT unless no one else is - * taking care of it. - */ - if (!ec->curr) - advance_transaction(ec); - spin_unlock_irqrestore(&ec->lock, flags); - } - } + spin_unlock_irq(&ec->lock); + + return 0; + +err_exit: + kfree(q); + + return result; } static void acpi_ec_event_handler(struct work_struct *work) { - unsigned long flags; struct acpi_ec *ec = container_of(work, struct acpi_ec, work); ec_dbg_evt("Event started"); - spin_lock_irqsave(&ec->lock, flags); - while (ec->nr_pending_queries) { - spin_unlock_irqrestore(&ec->lock, flags); - (void)acpi_ec_query(ec, NULL); - spin_lock_irqsave(&ec->lock, flags); - ec->nr_pending_queries--; - /* - * Before exit, make sure that this work item can be - * scheduled again. There might be QR_EC failures, leaving - * EC_FLAGS_QUERY_PENDING uncleared and preventing this work - * item from being scheduled again. - */ - if (!ec->nr_pending_queries) { - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || - ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY) - acpi_ec_complete_query(ec); - } + spin_lock_irq(&ec->lock); + + while (ec->events_to_process) { + spin_unlock_irq(&ec->lock); + + acpi_ec_submit_query(ec); + + spin_lock_irq(&ec->lock); + + ec->events_to_process--; } - spin_unlock_irqrestore(&ec->lock, flags); - ec_dbg_evt("Event stopped"); + /* + * Before exit, make sure that the it will be possible to queue up the + * event handling work again regardless of whether or not the query + * queued up above is processed successfully. + */ + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { + bool guard_timeout; + + acpi_ec_complete_event(ec); + + ec_dbg_evt("Event stopped"); - acpi_ec_check_event(ec); + spin_unlock_irq(&ec->lock); + + guard_timeout = !!ec_guard(ec); + + spin_lock_irq(&ec->lock); + + /* Take care of SCI_EVT unless someone else is doing that. */ + if (guard_timeout && !ec->curr) + advance_transaction(ec, false); + } else { + acpi_ec_close_event(ec); + + ec_dbg_evt("Event stopped"); + } + + ec->events_in_progress--; + + spin_unlock_irq(&ec->lock); } -static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, - u32 gpe_number, void *data) +static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) +{ + /* + * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 + * changes to always trigger a GPE interrupt. + * + * GPE STS is a W1C register, which means: + * + * 1. Software can clear it without worrying about clearing the other + * GPEs' STS bits when the hardware sets them in parallel. + * + * 2. As long as software can ensure only clearing it when it is set, + * hardware won't set it in parallel. + */ + if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) + acpi_clear_gpe(NULL, ec->gpe); + + advance_transaction(ec, true); +} + +static void acpi_ec_handle_interrupt(struct acpi_ec *ec) { unsigned long flags; - struct acpi_ec *ec = data; spin_lock_irqsave(&ec->lock, flags); - advance_transaction(ec); + + clear_gpe_and_advance_transaction(ec, true); + spin_unlock_irqrestore(&ec->lock, flags); +} + +static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, void *data) +{ + acpi_ec_handle_interrupt(data); return ACPI_INTERRUPT_HANDLED; } +static irqreturn_t acpi_ec_irq_handler(int irq, void *data) +{ + acpi_ec_handle_interrupt(data); + return IRQ_HANDLED; +} + /* -------------------------------------------------------------------------- * Address Space Management * -------------------------------------------------------------------------- */ @@ -1302,6 +1349,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, struct acpi_ec *ec = handler_context; int result = 0, i, bytes = bits / 8; u8 *value = (u8 *)value64; + u32 glk; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -1309,17 +1357,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; + mutex_lock(&ec->mutex); + + if (ec->global_lock) { + acpi_status status; + + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto unlock; + } + } + if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); - for (i = 0; i < bytes; ++i, ++address, ++value) + for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? - acpi_ec_read(ec, address, value) : - acpi_ec_write(ec, address, *value); + acpi_ec_read_unlocked(ec, address, value) : + acpi_ec_write_unlocked(ec, address, *value); + if (result < 0) + break; + } if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); + if (ec->global_lock) + acpi_release_global_lock(glk); + +unlock: + mutex_unlock(&ec->mutex); + switch (result) { case -EINVAL: return AE_BAD_PARAMETER; @@ -1327,8 +1396,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, return AE_NOT_FOUND; case -ETIME: return AE_TIME; - default: + case 0: return AE_OK; + default: + return AE_ERROR; } } @@ -1362,6 +1433,8 @@ static struct acpi_ec *acpi_ec_alloc(void) ec->timestamp = jiffies; ec->busy_polling = true; ec->polling_guard = 0; + ec->gpe = -1; + ec->irq = -1; return ec; } @@ -1399,20 +1472,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) if (ec->data_addr == 0 || ec->command_addr == 0) return AE_OK; - if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { - /* - * Always inherit the GPE number setting from the ECDT - * EC. - */ - ec->gpe = boot_ec->gpe; - } else { - /* Get GPE bit assignment (EC events). */ - /* TODO: Add support for _GPE returning a package */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_FAILURE(status)) - return status; + /* Get GPE bit assignment (EC events). */ + /* TODO: Add support for _GPE returning a package */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) ec->gpe = tmp; - } + /* + * Errors are non-fatal, allowing for ACPI Reduced Hardware + * platforms which use GpioInt instead of GPE. + */ + /* Use the global lock for all EC transactions? */ tmp = 0; acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); @@ -1421,63 +1490,111 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) return AE_CTRL_TERMINATE; } -/* - * Note: This function returns an error code only when the address space - * handler is not installed, which means "not able to handle - * transactions". +static bool install_gpe_event_handler(struct acpi_ec *ec) +{ + acpi_status status; + + status = acpi_install_gpe_raw_handler(NULL, ec->gpe, + ACPI_GPE_EDGE_TRIGGERED, + &acpi_ec_gpe_handler, ec); + if (ACPI_FAILURE(status)) + return false; + + if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) + acpi_ec_enable_gpe(ec, true); + + return true; +} + +static bool install_gpio_irq_event_handler(struct acpi_ec *ec) +{ + return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler, + IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0; +} + +/** + * ec_install_handlers - Install service callbacks and register query methods. + * @ec: Target EC. + * @device: ACPI device object corresponding to @ec. + * @call_reg: If _REG should be called to notify OpRegion availability + * + * Install a handler for the EC address space type unless it has been installed + * already. If @device is not NULL, also look for EC query methods in the + * namespace and register them, and install an event (either GPE or GPIO IRQ) + * handler for the EC, if possible. + * + * Return: + * -ENODEV if the address space handler cannot be installed, which means + * "unable to handle transactions", + * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred, + * or 0 (success) otherwise. */ -static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) +static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, + bool call_reg) { acpi_status status; acpi_ec_start(ec, false); if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + acpi_ec_enter_noirq(ec); - status = acpi_install_address_space_handler(ec->handle, - ACPI_ADR_SPACE_EC, - &acpi_ec_space_handler, - NULL, ec); + status = acpi_install_address_space_handler_no_reg(scope_handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler, + NULL, ec); if (ACPI_FAILURE(status)) { - if (status == AE_NOT_FOUND) { - /* - * Maybe OS fails in evaluating the _REG - * object. The AE_NOT_FOUND error will be - * ignored and OS * continue to initialize - * EC. - */ - pr_err("Fail in evaluating the _REG object" - " of EC device. Broken bios is suspected.\n"); - } else { - acpi_ec_stop(ec, false); - return -ENODEV; - } + acpi_ec_stop(ec, false); + return -ENODEV; } set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } - if (!handle_events) + if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { + acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); + } + + if (!device) return 0; - if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) { + if (ec->gpe < 0) { + /* ACPI reduced hardware platforms use a GpioInt from _CRS. */ + int irq = acpi_dev_gpio_irq_get(device, 0); + /* + * Bail out right away for deferred probing or complete the + * initialization regardless of any other errors. + */ + if (irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (irq >= 0) + ec->irq = irq; + } + + if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { /* Find and register all query methods */ acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, acpi_ec_register_query_methods, NULL, ec, NULL); - set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags); + set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); } - if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) { - status = acpi_install_gpe_raw_handler(NULL, ec->gpe, - ACPI_GPE_EDGE_TRIGGERED, - &acpi_ec_gpe_handler, ec); - /* This is not fatal as we can poll EC events */ - if (ACPI_SUCCESS(status)) { - set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags); + if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { + bool ready = false; + + if (ec->gpe >= 0) + ready = install_gpe_event_handler(ec); + else if (ec->irq >= 0) + ready = install_gpio_irq_event_handler(ec); + + if (ready) { + set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); acpi_ec_leave_noirq(ec); - if (test_bit(EC_FLAGS_STARTED, &ec->flags) && - ec->reference_count >= 1) - acpi_ec_enable_gpe(ec, true); } + /* + * Failures to install an event handler are not fatal, because + * the EC can be polled for events. + */ } /* EC is fully operational, allow queries */ acpi_ec_enable_event(ec); @@ -1487,9 +1604,13 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) static void ec_remove_handlers(struct acpi_ec *ec) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { - if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, - ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) + if (ACPI_FAILURE(acpi_remove_address_space_handler( + scope_handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler))) pr_err("failed to remove space handler\n"); clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } @@ -1507,153 +1628,107 @@ static void ec_remove_handlers(struct acpi_ec *ec) */ acpi_ec_stop(ec, false); - if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) { - if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, - &acpi_ec_gpe_handler))) + if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { + if (ec->gpe >= 0 && + ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, + &acpi_ec_gpe_handler))) pr_err("failed to remove gpe handler\n"); - clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags); + + if (ec->irq >= 0) + free_irq(ec->irq, ec); + + clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); } - if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) { + if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { acpi_ec_remove_query_handlers(ec, true, 0); - clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags); + clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); } } -static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) +static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) { int ret; - ret = ec_install_handlers(ec, handle_events); - if (ret) - return ret; - /* First EC capable of handling transactions */ - if (!first_ec) { + if (!first_ec) first_ec = ec; - acpi_handle_info(first_ec->handle, "Used as first EC\n"); - } - - acpi_handle_info(ec->handle, - "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", - ec->gpe, ec->command_addr, ec->data_addr); - return ret; -} -static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle, - bool handle_events, bool is_ecdt) -{ - int ret; - - /* - * Changing the ACPI handle results in a re-configuration of the - * boot EC. And if it happens after the namespace initialization, - * it causes _REG evaluations. - */ - if (boot_ec && boot_ec->handle != handle) - ec_remove_handlers(boot_ec); + ret = ec_install_handlers(ec, device, call_reg); + if (ret) { + if (ec == first_ec) + first_ec = NULL; - /* Unset old boot EC */ - if (boot_ec != ec) - acpi_ec_free(boot_ec); - - /* - * ECDT device creation is split into acpi_ec_ecdt_probe() and - * acpi_ec_ecdt_start(). This function takes care of completing the - * ECDT parsing logic as the handle update should be performed - * between the installation/uninstallation of the handlers. - */ - if (ec->handle != handle) - ec->handle = handle; - - ret = acpi_ec_setup(ec, handle_events); - if (ret) return ret; - - /* Set new boot EC */ - if (!boot_ec) { - boot_ec = ec; - boot_ec_is_ecdt = is_ecdt; } - acpi_handle_info(boot_ec->handle, - "Used as boot %s EC to handle transactions%s\n", - is_ecdt ? "ECDT" : "DSDT", - handle_events ? " and events" : ""); - return ret; -} - -static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) -{ - struct acpi_table_ecdt *ecdt_ptr; - acpi_status status; - acpi_handle handle; - - status = acpi_get_table(ACPI_SIG_ECDT, 1, - (struct acpi_table_header **)&ecdt_ptr); - if (ACPI_FAILURE(status)) - return false; + pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, + ec->data_addr); - status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); - if (ACPI_FAILURE(status)) - return false; - - *phandle = handle; - return true; -} + if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { + if (ec->gpe >= 0) + pr_info("GPE=0x%x\n", ec->gpe); + else + pr_info("IRQ=%d\n", ec->irq); + } -static bool acpi_is_boot_ec(struct acpi_ec *ec) -{ - if (!boot_ec) - return false; - if (ec->command_addr == boot_ec->command_addr && - ec->data_addr == boot_ec->data_addr) - return true; - return false; + return ret; } static int acpi_ec_add(struct acpi_device *device) { - struct acpi_ec *ec = NULL; + struct acpi_ec *ec; int ret; - bool is_ecdt = false; - acpi_status status; - strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_EC_CLASS); + strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_EC_CLASS); - if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { - is_ecdt = true; + if (boot_ec && (boot_ec->handle == device->handle || + !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) { + /* Fast path: this device corresponds to the boot EC. */ ec = boot_ec; } else { + acpi_status status; + ec = acpi_ec_alloc(); if (!ec) return -ENOMEM; + status = ec_parse_device(device->handle, 0, ec, NULL); if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; - goto err_alloc; + goto err; } - } - if (acpi_is_boot_ec(ec)) { - boot_ec_is_ecdt = is_ecdt; - if (!is_ecdt) { + if (boot_ec && ec->command_addr == boot_ec->command_addr && + ec->data_addr == boot_ec->data_addr) { /* - * Trust PNP0C09 namespace location rather than - * ECDT ID. But trust ECDT GPE rather than _GPE - * because of ASUS quirks, so do not change - * boot_ec->gpe to ec->gpe. + * Trust PNP0C09 namespace location rather than ECDT ID. + * But trust ECDT GPE rather than _GPE because of ASUS + * quirks. So do not change boot_ec->gpe to ec->gpe, + * except when the TRUST_DSDT_GPE quirk is set. */ boot_ec->handle = ec->handle; + + if (EC_FLAGS_TRUST_DSDT_GPE) + boot_ec->gpe = ec->gpe; + acpi_handle_debug(ec->handle, "duplicated.\n"); acpi_ec_free(ec); ec = boot_ec; } - ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt); - } else - ret = acpi_ec_setup(ec, true); + } + + ret = acpi_ec_setup(ec, device, true); if (ret) - goto err_query; + goto err; + + if (ec == boot_ec) + acpi_handle_info(boot_ec->handle, + "Boot %s EC initialization complete\n", + boot_ec_is_ecdt ? "ECDT" : "DSDT"); + + acpi_handle_info(ec->handle, + "EC: Used to handle transactions and events\n"); device->driver_data = ec; @@ -1662,28 +1737,25 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - if (!is_ecdt) { - /* Reprobe devices depending on the EC */ - acpi_walk_dep_device_list(ec->handle); - } + /* Reprobe devices depending on the EC */ + acpi_dev_clear_dependencies(device); + acpi_handle_debug(ec->handle, "enumerated.\n"); return 0; -err_query: - if (ec != boot_ec) - acpi_ec_remove_query_handlers(ec, true, 0); -err_alloc: +err: if (ec != boot_ec) acpi_ec_free(ec); + return ret; } -static int acpi_ec_remove(struct acpi_device *device) +static void acpi_ec_remove(struct acpi_device *device) { struct acpi_ec *ec; if (!device) - return -EINVAL; + return; ec = acpi_driver_data(device); release_region(ec->data_addr, 1); @@ -1693,7 +1765,12 @@ static int acpi_ec_remove(struct acpi_device *device) ec_remove_handlers(ec); acpi_ec_free(ec); } - return 0; +} + +void acpi_ec_register_opregions(struct acpi_device *adev) +{ + if (first_ec && first_ec->handle != adev->handle) + acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC); } static acpi_status @@ -1730,10 +1807,10 @@ static const struct acpi_device_id ec_device_ids[] = { * namespace EC before the main ACPI device enumeration process. It is * retained for historical reason and will be deprecated in the future. */ -int __init acpi_ec_dsdt_probe(void) +void __init acpi_ec_dsdt_probe(void) { - acpi_status status; struct acpi_ec *ec; + acpi_status status; int ret; /* @@ -1743,21 +1820,22 @@ int __init acpi_ec_dsdt_probe(void) * picking up an invalid EC device. */ if (boot_ec) - return -ENODEV; + return; ec = acpi_ec_alloc(); if (!ec) - return -ENOMEM; + return; + /* * At this point, the namespace is initialized, so start to find * the namespace objects. */ - status = acpi_get_devices(ec_device_ids[0].id, - ec_parse_device, ec, NULL); + status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL); if (ACPI_FAILURE(status) || !ec->handle) { - ret = -ENODEV; - goto error; + acpi_ec_free(ec); + return; } + /* * When the DSDT EC is available, always re-configure boot EC to * have _REG evaluated. _REG can only be evaluated after the @@ -1765,60 +1843,82 @@ int __init acpi_ec_dsdt_probe(void) * At this point, the GPE is not fully initialized, so do not to * handle the events. */ - ret = acpi_config_boot_ec(ec, ec->handle, false, false); -error: - if (ret) + ret = acpi_ec_setup(ec, NULL, true); + if (ret) { acpi_ec_free(ec); - return ret; + return; + } + + boot_ec = ec; + + acpi_handle_info(ec->handle, + "Boot DSDT EC used to handle transactions\n"); } /* - * If the DSDT EC is not functioning, we still need to prepare a fully - * functioning ECDT EC first in order to handle the events. - * https://bugzilla.kernel.org/show_bug.cgi?id=115021 + * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization. + * + * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not + * found a matching object in the namespace. + * + * Next, in case the DSDT EC is not functioning, it is still necessary to + * provide a functional ECDT EC to handle events, so add an extra device object + * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021). + * + * This is useful on platforms with valid ECDT and invalid DSDT EC settings, + * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847). */ -static int __init acpi_ec_ecdt_start(void) +static void __init acpi_ec_ecdt_start(void) { + struct acpi_table_ecdt *ecdt_ptr; acpi_handle handle; + acpi_status status; - if (!boot_ec) - return -ENODEV; - /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */ - if (!boot_ec_is_ecdt) - return -ENODEV; + /* Bail out if a matching EC has been found in the namespace. */ + if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT) + return; - /* - * At this point, the namespace and the GPE is initialized, so - * start to find the namespace objects and handle the events. - * - * Note: ec->handle can be valid if this function is called after - * acpi_ec_add(), hence the fast path. - */ - if (boot_ec->handle == ACPI_ROOT_OBJECT) { - if (!acpi_ec_ecdt_get_handle(&handle)) - return -ENODEV; + /* Look up the object pointed to from the ECDT in the namespace. */ + status = acpi_get_table(ACPI_SIG_ECDT, 1, + (struct acpi_table_header **)&ecdt_ptr); + if (ACPI_FAILURE(status)) + return; + + status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); + if (ACPI_SUCCESS(status)) { boot_ec->handle = handle; + + /* Add a special ACPI device object to represent the boot EC. */ + acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); } - /* Register to ACPI bus with PM ops attached */ - return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); + acpi_put_table((struct acpi_table_header *)ecdt_ptr); } -#if 0 /* - * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not - * set, for which case, we complete the QR_EC without issuing it to the - * firmware. - * https://bugzilla.kernel.org/show_bug.cgi?id=82611 - * https://bugzilla.kernel.org/show_bug.cgi?id=97381 + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. */ -static int ec_flag_query_handshake(const struct dmi_system_id *id) +static int ec_clear_on_resume(const struct dmi_system_id *id) { - pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); - EC_FLAGS_QUERY_HANDSHAKE = 1; + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; return 0; } -#endif /* * Some ECDTs contain wrong register addresses. @@ -1833,75 +1933,129 @@ static int ec_correct_ecdt(const struct dmi_system_id *id) } /* - * Some DSDTs contain wrong GPE setting. - * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD - * https://bugzilla.kernel.org/show_bug.cgi?id=195651 + * Some ECDTs contain wrong GPE setting, but they share the same port addresses + * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case. + * https://bugzilla.kernel.org/show_bug.cgi?id=209989 */ -static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) +static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) { - pr_debug("Detected system needing ignore DSDT GPE setting.\n"); - EC_FLAGS_IGNORE_DSDT_GPE = 1; + pr_debug("Detected system needing DSDT GPE setting.\n"); + EC_FLAGS_TRUST_DSDT_GPE = 1; return 0; } static const struct dmi_system_id ec_dmi_table[] __initconst = { { - ec_correct_ecdt, "MSI MS-171F", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), - DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, + /* + * MSI MS-171F + * https://bugzilla.kernel.org/show_bug.cgi?id=12461 + */ + .callback = ec_correct_ecdt, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS FX502VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-cx0xxx + * https://bugzilla.kernel.org/show_bug.cgi?id=209989 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS FX502VE", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-cx0041ur + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS GL702VMK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-dk1xxx + * https://github.com/systemd/systemd/issues/28942 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS X550VXK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, + /* + * HP 250 G7 Notebook PC + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS X580VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, - {}, + /* + * Samsung hardware + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + */ + .callback = ec_clear_on_resume, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + }, + }, + {} }; -int __init acpi_ec_ecdt_probe(void) +void __init acpi_ec_ecdt_probe(void) { - int ret; - acpi_status status; struct acpi_table_ecdt *ecdt_ptr; struct acpi_ec *ec; + acpi_status status; + int ret; - ec = acpi_ec_alloc(); - if (!ec) - return -ENOMEM; - /* - * Generate a boot ec context - */ + /* Generate a boot ec context. */ dmi_check_system(ec_dmi_table); status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); - if (ACPI_FAILURE(status)) { - ret = -ENODEV; - goto error; - } + if (ACPI_FAILURE(status)) + return; if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { /* * Asus X50GL: * https://bugzilla.kernel.org/show_bug.cgi?id=11880 */ - ret = -ENODEV; - goto error; + goto out; + } + + if (!strlen(ecdt_ptr->id)) { + /* + * The ECDT table on some MSI notebooks contains invalid data, together + * with an empty ID string (""). + * + * Section 5.2.15 of the ACPI specification requires the ID string to be + * a "fully qualified reference to the (...) embedded controller device", + * so this string always has to start with a backslash. + * + * However some ThinkBook machines have a ECDT table with a valid EC + * description but an invalid ID string ("_SB.PC00.LPCB.EC0"). + * + * Because of this we only check if the ID string is empty in order to + * avoid the obvious cases. + */ + pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n"); + goto out; } + ec = acpi_ec_alloc(); + if (!ec) + goto out; + if (EC_FLAGS_CORRECT_ECDT) { ec->command_addr = ecdt_ptr->data.address; ec->data_addr = ecdt_ptr->control.address; @@ -1909,17 +2063,33 @@ int __init acpi_ec_ecdt_probe(void) ec->command_addr = ecdt_ptr->control.address; ec->data_addr = ecdt_ptr->data.address; } - ec->gpe = ecdt_ptr->gpe; + + /* + * Ignore the GPE value on Reduced Hardware platforms. + * Some products have this set to an erroneous value. + */ + if (!acpi_gbl_reduced_hardware) + ec->gpe = ecdt_ptr->gpe; + + ec->handle = ACPI_ROOT_OBJECT; /* * At this point, the namespace is not initialized, so do not find * the namespace objects, or handle the events. */ - ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true); -error: - if (ret) + ret = acpi_ec_setup(ec, NULL, false); + if (ret) { acpi_ec_free(ec); - return ret; + goto out; + } + + boot_ec = ec; + boot_ec_is_ecdt = true; + + pr_info("Boot ECDT EC used to handle transactions\n"); + +out: + acpi_put_table((struct acpi_table_header *)ecdt_ptr); } #ifdef CONFIG_PM_SLEEP @@ -1928,7 +2098,7 @@ static int acpi_ec_suspend(struct device *dev) struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); - if (acpi_sleep_no_ec_events() && ec_freeze_events) + if (!pm_suspend_no_platform() && ec_freeze_events) acpi_ec_disable_event(ec); return 0; } @@ -1942,11 +2112,10 @@ static int acpi_ec_suspend_noirq(struct device *dev) * masked at the low level without side effects. */ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && - ec->reference_count >= 1) + ec->gpe >= 0 && ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); - if (acpi_sleep_no_ec_events()) - acpi_ec_enter_noirq(ec); + acpi_ec_enter_noirq(ec); return 0; } @@ -1955,11 +2124,10 @@ static int acpi_ec_resume_noirq(struct device *dev) { struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); - if (acpi_sleep_no_ec_events()) - acpi_ec_leave_noirq(ec); + acpi_ec_leave_noirq(ec); if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && - ec->reference_count >= 1) + ec->gpe >= 0 && ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); return 0; @@ -1973,7 +2141,85 @@ static int acpi_ec_resume(struct device *dev) acpi_ec_enable_event(ec); return 0; } -#endif + +void acpi_ec_mark_gpe_for_wake(void) +{ + if (first_ec && !ec_no_wakeup) + acpi_mark_gpe_for_wake(NULL, first_ec->gpe); +} +EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake); + +void acpi_ec_set_gpe_wake_mask(u8 action) +{ + if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup) + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); +} + +static bool acpi_ec_work_in_progress(struct acpi_ec *ec) +{ + return ec->events_in_progress + ec->queries_in_progress > 0; +} + +bool acpi_ec_dispatch_gpe(void) +{ + bool work_in_progress = false; + + if (!first_ec) + return acpi_any_gpe_status_set(U32_MAX); + + /* + * Report wakeup if the status bit is set for any enabled GPE other + * than the EC one. + */ + if (acpi_any_gpe_status_set(first_ec->gpe)) + return true; + + /* + * Cancel the SCI wakeup and process all pending events in case there + * are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the SCI will + * retrigger after the rearming in acpi_s2idle_wake(), so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); + + /* + * Dispatch the EC GPE in-band, but do not report wakeup in any case + * to allow the caller to process events properly after that. + */ + spin_lock_irq(&first_ec->lock); + + if (acpi_ec_gpe_status_set(first_ec)) { + pm_pr_dbg("ACPI EC GPE status set\n"); + + clear_gpe_and_advance_transaction(first_ec, false); + work_in_progress = acpi_ec_work_in_progress(first_ec); + } + + spin_unlock_irq(&first_ec->lock); + + if (!work_in_progress) + return false; + + pm_pr_dbg("ACPI EC GPE dispatched\n"); + + /* Drain EC work. */ + do { + acpi_ec_flush_work(); + + pm_pr_dbg("ACPI EC work flushed\n"); + + spin_lock_irq(&first_ec->lock); + + work_in_progress = acpi_ec_work_in_progress(first_ec); + + spin_unlock_irq(&first_ec->lock); + } while (work_in_progress && !pm_wakeup_pending()); + + return false; +} +#endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops acpi_ec_pm = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) @@ -2004,13 +2250,13 @@ static int param_get_event_clearing(char *buffer, { switch (ec_event_clearing) { case ACPI_EC_EVT_TIMING_STATUS: - return sprintf(buffer, "status"); + return sprintf(buffer, "status\n"); case ACPI_EC_EVT_TIMING_QUERY: - return sprintf(buffer, "query"); + return sprintf(buffer, "query\n"); case ACPI_EC_EVT_TIMING_EVENT: - return sprintf(buffer, "event"); + return sprintf(buffer, "event\n"); default: - return sprintf(buffer, "invalid"); + return sprintf(buffer, "invalid\n"); } return 0; } @@ -2030,59 +2276,97 @@ static struct acpi_driver acpi_ec_driver = { .drv.pm = &acpi_ec_pm, }; -static inline int acpi_ec_query_init(void) +static void acpi_ec_destroy_workqueues(void) { - if (!ec_query_wq) { - ec_query_wq = alloc_workqueue("kec_query", 0, - ec_max_queries); - if (!ec_query_wq) - return -ENODEV; + if (ec_wq) { + destroy_workqueue(ec_wq); + ec_wq = NULL; } - return 0; -} - -static inline void acpi_ec_query_exit(void) -{ if (ec_query_wq) { destroy_workqueue(ec_query_wq); ec_query_wq = NULL; } } +static int acpi_ec_init_workqueues(void) +{ + if (!ec_wq) + ec_wq = alloc_ordered_workqueue("kec", 0); + + if (!ec_query_wq) + ec_query_wq = alloc_workqueue("kec_query", WQ_PERCPU, + ec_max_queries); + + if (!ec_wq || !ec_query_wq) { + acpi_ec_destroy_workqueues(); + return -ENODEV; + } + return 0; +} + static const struct dmi_system_id acpi_ec_no_wakeup[] = { { - .ident = "Thinkpad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), }, }, { - .ident = "ThinkPad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), }, }, { - .ident = "ThinkPad X1 Yoga 3rd", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), + }, + }, + /* + * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC + * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, + { + // TUXEDO InfinityBook Pro AMD Gen9 + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), }, }, { }, }; -int __init acpi_ec_init(void) +void __init acpi_ec_init(void) { int result; - int ecdt_fail, dsdt_fail; - /* register workqueue for _Qxx evaluations */ - result = acpi_ec_query_init(); + result = acpi_ec_init_workqueues(); if (result) - return result; + return; /* * Disable EC wakeup on following systems to prevent periodic @@ -2093,16 +2377,10 @@ int __init acpi_ec_init(void) pr_debug("Disabling EC wakeup on suspend-to-idle\n"); } - /* Drivers must be started after acpi_ec_query_init() */ - dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); - /* - * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is - * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT - * settings but invalid DSDT settings. - * https://bugzilla.kernel.org/show_bug.cgi?id=196847 - */ - ecdt_fail = acpi_ec_ecdt_start(); - return ecdt_fail && dsdt_fail ? -ENODEV : 0; + /* Driver must be registered after acpi_ec_init_workqueues(). */ + acpi_bus_register_driver(&acpi_ec_driver); + + acpi_ec_ecdt_start(); } /* EC driver currently not unloadable */ @@ -2111,6 +2389,6 @@ static void __exit acpi_ec_exit(void) { acpi_bus_unregister_driver(&acpi_ec_driver); - acpi_ec_query_exit(); + acpi_ec_destroy_workqueues(); } #endif /* 0 */ diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index dd70d6c2bca0..c074a0fae059 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -1,11 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ec_sys.c * * Copyright (C) 2010 SUSE Products GmbH/Novell * Author: * Thomas Renninger <trenn@suse.de> - * - * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/kernel.h> @@ -20,7 +19,7 @@ MODULE_DESCRIPTION("ACPI EC sysfs access driver"); MODULE_LICENSE("GPL"); static bool write_support; -module_param(write_support, bool, 0644); +module_param_hw(write_support, bool, other, 0644); MODULE_PARM_DESC(write_support, "Dangerous, reboot and removal of battery may " "be needed."); @@ -108,52 +107,32 @@ static const struct file_operations acpi_ec_io_ops = { .llseek = default_llseek, }; -static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) +static void acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) { struct dentry *dev_dir; char name[64]; umode_t mode = 0400; - if (ec_device_count == 0) { + if (ec_device_count == 0) acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL); - if (!acpi_ec_debugfs_dir) - return -ENOMEM; - } sprintf(name, "ec%u", ec_device_count); dev_dir = debugfs_create_dir(name, acpi_ec_debugfs_dir); - if (!dev_dir) { - if (ec_device_count != 0) - goto error; - return -ENOMEM; - } - if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe)) - goto error; - if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, - &first_ec->global_lock)) - goto error; + debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe); + debugfs_create_bool("use_global_lock", 0444, dev_dir, + &first_ec->global_lock); if (write_support) mode = 0600; - if (!debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops)) - goto error; - - return 0; - -error: - debugfs_remove_recursive(acpi_ec_debugfs_dir); - return -ENOMEM; + debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops); } static int __init acpi_ec_sys_init(void) { - int err = 0; if (first_ec) - err = acpi_ec_add_debugfs(first_ec, 0); - else - err = -ENODEV; - return err; + acpi_ec_add_debugfs(first_ec, 0); + return 0; } static void __exit acpi_ec_sys_exit(void) diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index 5a127f3f2d5c..96a9aaaaf9f7 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -7,6 +7,8 @@ * */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/spinlock.h> #include <linux/export.h> #include <linux/proc_fs.h> @@ -19,9 +21,6 @@ #include "internal.h" -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("event"); - /* ACPI notifier chain */ static BLOCKING_NOTIFIER_HEAD(acpi_chain_head); @@ -29,12 +28,12 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data) { struct acpi_bus_event event; - strcpy(event.device_class, dev->pnp.device_class); - strcpy(event.bus_id, dev->pnp.bus_id); + strscpy(event.device_class, dev->pnp.device_class); + strscpy(event.bus_id, dev->pnp.bus_id); event.type = type; event.data = data; return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event) - == NOTIFY_BAD) ? -EINVAL : 0; + == NOTIFY_BAD) ? -EINVAL : 0; } EXPORT_SYMBOL(acpi_notifier_call_chain); @@ -131,8 +130,8 @@ int acpi_bus_generate_netlink_event(const char *device_class, event = nla_data(attr); memset(event, 0, sizeof(struct acpi_genl_event)); - strcpy(event->device_class, device_class); - strcpy(event->bus_id, bus_id); + strscpy(event->device_class, device_class, sizeof(event->device_class)); + strscpy(event->bus_id, bus_id, sizeof(event->bus_id)); event->type = type; event->data = data; @@ -168,7 +167,7 @@ static int acpi_event_genetlink_init(void) static int __init acpi_event_init(void) { - int error = 0; + int error; if (acpi_disabled) return 0; @@ -176,8 +175,8 @@ static int __init acpi_event_init(void) /* create genetlink for acpi event */ error = acpi_event_genetlink_init(); if (error) - printk(KERN_WARNING PREFIX - "Failed to create genetlink family for ACPI event\n"); + pr_warn("Failed to create genetlink family for ACPI event\n"); + return 0; } diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c index f13ba2c07667..5c35cbc7f6ff 100644 --- a/drivers/acpi/evged.c +++ b/drivers/acpi/evged.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Generic Event Device for ACPI. * * Copyright (c) 2016, The Linux Foundation. All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Generic Event Device allows platforms to handle interrupts in ACPI * ASL statements. It follows very similar to _EVT method approach * from GPIO events. All interrupts are listed in _CRS and the handler @@ -37,7 +29,6 @@ * } * } * } - * */ #include <linux/err.h> @@ -88,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, struct resource r; struct acpi_resource_irq *p = &ares->data.irq; struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; + char ev_name[5]; + u8 trigger; if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) return AE_OK; @@ -96,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, dev_err(dev, "unable to parse IRQ resource\n"); return AE_ERROR; } - if (ares->type == ACPI_RESOURCE_TYPE_IRQ) + if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { gsi = p->interrupts[0]; - else + trigger = p->triggering; + } else { gsi = pext->interrupts[0]; + trigger = pext->triggering; + } irq = r.start; - if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { + switch (gsi) { + case 0 ... 255: + sprintf(ev_name, "_%c%02X", + trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); + + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) + break; + fallthrough; + default: + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) + break; + dev_err(dev, "cannot locate _EVT method\n"); return AE_ERROR; } @@ -166,10 +173,9 @@ static void ged_shutdown(struct platform_device *pdev) } } -static int ged_remove(struct platform_device *pdev) +static void ged_remove(struct platform_device *pdev) { ged_shutdown(pdev); - return 0; } static const struct acpi_device_id ged_acpi_ids[] = { diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c deleted file mode 100644 index fe0183d48dcd..000000000000 --- a/drivers/acpi/fan.c +++ /dev/null @@ -1,427 +0,0 @@ -/* - * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $) - * - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/uaccess.h> -#include <linux/thermal.h> -#include <linux/acpi.h> -#include <linux/platform_device.h> -#include <linux/sort.h> - -MODULE_AUTHOR("Paul Diefenbaugh"); -MODULE_DESCRIPTION("ACPI Fan Driver"); -MODULE_LICENSE("GPL"); - -static int acpi_fan_probe(struct platform_device *pdev); -static int acpi_fan_remove(struct platform_device *pdev); - -static const struct acpi_device_id fan_device_ids[] = { - {"PNP0C0B", 0}, - {"INT3404", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, fan_device_ids); - -#ifdef CONFIG_PM_SLEEP -static int acpi_fan_suspend(struct device *dev); -static int acpi_fan_resume(struct device *dev); -static const struct dev_pm_ops acpi_fan_pm = { - .resume = acpi_fan_resume, - .freeze = acpi_fan_suspend, - .thaw = acpi_fan_resume, - .restore = acpi_fan_resume, -}; -#define FAN_PM_OPS_PTR (&acpi_fan_pm) -#else -#define FAN_PM_OPS_PTR NULL -#endif - -struct acpi_fan_fps { - u64 control; - u64 trip_point; - u64 speed; - u64 noise_level; - u64 power; -}; - -struct acpi_fan_fif { - u64 revision; - u64 fine_grain_ctrl; - u64 step_size; - u64 low_speed_notification; -}; - -struct acpi_fan { - bool acpi4; - struct acpi_fan_fif fif; - struct acpi_fan_fps *fps; - int fps_count; - struct thermal_cooling_device *cdev; -}; - -static struct platform_driver acpi_fan_driver = { - .probe = acpi_fan_probe, - .remove = acpi_fan_remove, - .driver = { - .name = "acpi-fan", - .acpi_match_table = fan_device_ids, - .pm = FAN_PM_OPS_PTR, - }, -}; - -/* thermal cooling device callbacks */ -static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long - *state) -{ - struct acpi_device *device = cdev->devdata; - struct acpi_fan *fan = acpi_driver_data(device); - - if (fan->acpi4) - *state = fan->fps_count - 1; - else - *state = 1; - return 0; -} - -static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) -{ - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_fan *fan = acpi_driver_data(device); - union acpi_object *obj; - acpi_status status; - int control, i; - - status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - dev_err(&device->dev, "Get fan state failed\n"); - return status; - } - - obj = buffer.pointer; - if (!obj || obj->type != ACPI_TYPE_PACKAGE || - obj->package.count != 3 || - obj->package.elements[1].type != ACPI_TYPE_INTEGER) { - dev_err(&device->dev, "Invalid _FST data\n"); - status = -EINVAL; - goto err; - } - - control = obj->package.elements[1].integer.value; - for (i = 0; i < fan->fps_count; i++) { - /* - * When Fine Grain Control is set, return the state - * corresponding to maximum fan->fps[i].control - * value compared to the current speed. Here the - * fan->fps[] is sorted array with increasing speed. - */ - if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) { - i = (i > 0) ? i - 1 : 0; - break; - } else if (control == fan->fps[i].control) { - break; - } - } - if (i == fan->fps_count) { - dev_dbg(&device->dev, "Invalid control value returned\n"); - status = -EINVAL; - goto err; - } - - *state = i; - -err: - kfree(obj); - return status; -} - -static int fan_get_state(struct acpi_device *device, unsigned long *state) -{ - int result; - int acpi_state = ACPI_STATE_D0; - - result = acpi_device_update_power(device, &acpi_state); - if (result) - return result; - - *state = acpi_state == ACPI_STATE_D3_COLD - || acpi_state == ACPI_STATE_D3_HOT ? - 0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1); - return 0; -} - -static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long - *state) -{ - struct acpi_device *device = cdev->devdata; - struct acpi_fan *fan = acpi_driver_data(device); - - if (fan->acpi4) - return fan_get_state_acpi4(device, state); - else - return fan_get_state(device, state); -} - -static int fan_set_state(struct acpi_device *device, unsigned long state) -{ - if (state != 0 && state != 1) - return -EINVAL; - - return acpi_device_set_power(device, - state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); -} - -static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state) -{ - struct acpi_fan *fan = acpi_driver_data(device); - acpi_status status; - - if (state >= fan->fps_count) - return -EINVAL; - - status = acpi_execute_simple_method(device->handle, "_FSL", - fan->fps[state].control); - if (ACPI_FAILURE(status)) { - dev_dbg(&device->dev, "Failed to set state by _FSL\n"); - return status; - } - - return 0; -} - -static int -fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) -{ - struct acpi_device *device = cdev->devdata; - struct acpi_fan *fan = acpi_driver_data(device); - - if (fan->acpi4) - return fan_set_state_acpi4(device, state); - else - return fan_set_state(device, state); -} - -static const struct thermal_cooling_device_ops fan_cooling_ops = { - .get_max_state = fan_get_max_state, - .get_cur_state = fan_get_cur_state, - .set_cur_state = fan_set_cur_state, -}; - -/* -------------------------------------------------------------------------- - * Driver Interface - * -------------------------------------------------------------------------- -*/ - -static bool acpi_fan_is_acpi4(struct acpi_device *device) -{ - return acpi_has_method(device->handle, "_FIF") && - acpi_has_method(device->handle, "_FPS") && - acpi_has_method(device->handle, "_FSL") && - acpi_has_method(device->handle, "_FST"); -} - -static int acpi_fan_get_fif(struct acpi_device *device) -{ - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_fan *fan = acpi_driver_data(device); - struct acpi_buffer format = { sizeof("NNNN"), "NNNN" }; - struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif }; - union acpi_object *obj; - acpi_status status; - - status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer); - if (ACPI_FAILURE(status)) - return status; - - obj = buffer.pointer; - if (!obj || obj->type != ACPI_TYPE_PACKAGE) { - dev_err(&device->dev, "Invalid _FIF data\n"); - status = -EINVAL; - goto err; - } - - status = acpi_extract_package(obj, &format, &fif); - if (ACPI_FAILURE(status)) { - dev_err(&device->dev, "Invalid _FIF element\n"); - status = -EINVAL; - } - -err: - kfree(obj); - return status; -} - -static int acpi_fan_speed_cmp(const void *a, const void *b) -{ - const struct acpi_fan_fps *fps1 = a; - const struct acpi_fan_fps *fps2 = b; - return fps1->speed - fps2->speed; -} - -static int acpi_fan_get_fps(struct acpi_device *device) -{ - struct acpi_fan *fan = acpi_driver_data(device); - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; - int i; - - status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer); - if (ACPI_FAILURE(status)) - return status; - - obj = buffer.pointer; - if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { - dev_err(&device->dev, "Invalid _FPS data\n"); - status = -EINVAL; - goto err; - } - - fan->fps_count = obj->package.count - 1; /* minus revision field */ - fan->fps = devm_kcalloc(&device->dev, - fan->fps_count, sizeof(struct acpi_fan_fps), - GFP_KERNEL); - if (!fan->fps) { - dev_err(&device->dev, "Not enough memory\n"); - status = -ENOMEM; - goto err; - } - for (i = 0; i < fan->fps_count; i++) { - struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; - struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] }; - status = acpi_extract_package(&obj->package.elements[i + 1], - &format, &fps); - if (ACPI_FAILURE(status)) { - dev_err(&device->dev, "Invalid _FPS element\n"); - break; - } - } - - /* sort the state array according to fan speed in increase order */ - sort(fan->fps, fan->fps_count, sizeof(*fan->fps), - acpi_fan_speed_cmp, NULL); - -err: - kfree(obj); - return status; -} - -static int acpi_fan_probe(struct platform_device *pdev) -{ - int result = 0; - struct thermal_cooling_device *cdev; - struct acpi_fan *fan; - struct acpi_device *device = ACPI_COMPANION(&pdev->dev); - char *name; - - fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); - if (!fan) { - dev_err(&device->dev, "No memory for fan\n"); - return -ENOMEM; - } - device->driver_data = fan; - platform_set_drvdata(pdev, fan); - - if (acpi_fan_is_acpi4(device)) { - if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) - goto end; - fan->acpi4 = true; - } else { - result = acpi_device_update_power(device, NULL); - if (result) { - dev_err(&device->dev, "Failed to set initial power state\n"); - goto end; - } - } - - if (!strncmp(pdev->name, "PNP0C0B", strlen("PNP0C0B"))) - name = "Fan"; - else - name = acpi_device_bid(device); - - cdev = thermal_cooling_device_register(name, device, - &fan_cooling_ops); - if (IS_ERR(cdev)) { - result = PTR_ERR(cdev); - goto end; - } - - dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); - - fan->cdev = cdev; - result = sysfs_create_link(&pdev->dev.kobj, - &cdev->device.kobj, - "thermal_cooling"); - if (result) - dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n"); - - result = sysfs_create_link(&cdev->device.kobj, - &pdev->dev.kobj, - "device"); - if (result) - dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); - -end: - return result; -} - -static int acpi_fan_remove(struct platform_device *pdev) -{ - struct acpi_fan *fan = platform_get_drvdata(pdev); - - sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); - sysfs_remove_link(&fan->cdev->device.kobj, "device"); - thermal_cooling_device_unregister(fan->cdev); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int acpi_fan_suspend(struct device *dev) -{ - struct acpi_fan *fan = dev_get_drvdata(dev); - if (fan->acpi4) - return 0; - - acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0); - - return AE_OK; -} - -static int acpi_fan_resume(struct device *dev) -{ - int result; - struct acpi_fan *fan = dev_get_drvdata(dev); - - if (fan->acpi4) - return 0; - - result = acpi_device_update_power(ACPI_COMPANION(dev), NULL); - if (result) - dev_err(dev, "Error updating fan power state\n"); - - return result; -} -#endif - -module_platform_driver(acpi_fan_driver); diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h new file mode 100644 index 000000000000..97ce3212edf3 --- /dev/null +++ b/drivers/acpi/fan.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * ACPI fan device IDs are shared between the fan driver and the device power + * management code. + * + * Add new device IDs before the generic ACPI fan one. + */ + +#ifndef _ACPI_FAN_H_ +#define _ACPI_FAN_H_ + +#include <linux/kconfig.h> +#include <linux/limits.h> + +#define ACPI_FAN_DEVICE_IDS \ + {"INT3404", }, /* Fan */ \ + {"INTC1044", }, /* Fan for Tiger Lake generation */ \ + {"INTC1048", }, /* Fan for Alder Lake generation */ \ + {"INTC1063", }, /* Fan for Meteor Lake generation */ \ + {"INTC106A", }, /* Fan for Lunar Lake generation */ \ + {"INTC10A2", }, /* Fan for Raptor Lake generation */ \ + {"INTC10D6", }, /* Fan for Panther Lake generation */ \ + {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \ + {"INTC10F5", }, /* Fan for Nova Lake generation */ \ + {"PNP0C0B", } /* Generic ACPI fan */ + +#define ACPI_FPS_NAME_LEN 20 + +struct acpi_fan_fps { + u64 control; + u64 trip_point; + u64 speed; + u64 noise_level; + u64 power; + char name[ACPI_FPS_NAME_LEN]; + struct device_attribute dev_attr; +}; + +struct acpi_fan_fif { + u8 revision; + u8 fine_grain_ctrl; + u8 step_size; + u8 low_speed_notification; +}; + +struct acpi_fan_fst { + u64 revision; + u64 control; + u64 speed; +}; + +struct acpi_fan { + acpi_handle handle; + bool acpi4; + bool has_fst; + struct acpi_fan_fif fif; + struct acpi_fan_fps *fps; + int fps_count; + /* A value of 0 means that trippoint-related functions are not supported */ + u32 fan_trip_granularity; +#if IS_REACHABLE(CONFIG_HWMON) + struct device *hdev; +#endif + struct thermal_cooling_device *cdev; + struct device_attribute fst_speed; + struct device_attribute fine_grain_control; +}; + +/** + * acpi_fan_speed_valid - Check if fan speed value is valid + * @speeed: Speed value returned by the ACPI firmware + * + * Check if the fan speed value returned by the ACPI firmware is valid. This function is + * necessary as ACPI firmware implementations can return 0xFFFFFFFF to signal that the + * ACPI fan does not support speed reporting. Additionally, some buggy ACPI firmware + * implementations return a value larger than the 32-bit integer value defined by + * the ACPI specification when using placeholder values. Such invalid values are also + * detected by this function. + * + * Returns: True if the fan speed value is valid, false otherwise. + */ +static inline bool acpi_fan_speed_valid(u64 speed) +{ + return speed < U32_MAX; +} + +/** + * acpi_fan_power_valid - Check if fan power value is valid + * @power: Power value returned by the ACPI firmware + * + * Check if the fan power value returned by the ACPI firmware is valid. + * See acpi_fan_speed_valid() for details. + * + * Returns: True if the fan power value is valid, false otherwise. + */ +static inline bool acpi_fan_power_valid(u64 power) +{ + return power < U32_MAX; +} + +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst); +int acpi_fan_create_attributes(struct acpi_device *device); +void acpi_fan_delete_attributes(struct acpi_device *device); + +#if IS_REACHABLE(CONFIG_HWMON) +int devm_acpi_fan_create_hwmon(struct device *dev); +void acpi_fan_notify_hwmon(struct device *dev); +#else +static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; }; +static inline void acpi_fan_notify_hwmon(struct device *dev) { }; +#endif + +#endif diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c new file mode 100644 index 000000000000..9b7fa52f3c2a --- /dev/null +++ b/drivers/acpi/fan_attr.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * fan_attr.c - Create extra attributes for ACPI Fan driver + * + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2022 Intel Corporation. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> + +#include "fan.h" + +MODULE_LICENSE("GPL"); + +static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); + int count; + + if (fps->control == 0xFFFFFFFF || fps->control > 100) + count = sysfs_emit(buf, "not-defined:"); + else + count = sysfs_emit(buf, "%lld:", fps->control); + + if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) + count += sysfs_emit_at(buf, count, "not-defined:"); + else + count += sysfs_emit_at(buf, count, "%lld:", fps->trip_point); + + if (fps->speed == 0xFFFFFFFF) + count += sysfs_emit_at(buf, count, "not-defined:"); + else + count += sysfs_emit_at(buf, count, "%lld:", fps->speed); + + if (fps->noise_level == 0xFFFFFFFF) + count += sysfs_emit_at(buf, count, "not-defined:"); + else + count += sysfs_emit_at(buf, count, "%lld:", fps->noise_level * 100); + + if (fps->power == 0xFFFFFFFF) + count += sysfs_emit_at(buf, count, "not-defined\n"); + else + count += sysfs_emit_at(buf, count, "%lld\n", fps->power); + + return count; +} + +static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev); + struct acpi_fan_fst fst; + int status; + + status = acpi_fan_get_fst(acpi_dev->handle, &fst); + if (status) + return status; + + return sysfs_emit(buf, "%lld\n", fst.speed); +} + +static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev); + struct acpi_fan *fan = acpi_driver_data(acpi_dev); + + return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl); +} + +int acpi_fan_create_attributes(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + int i, status; + + /* _FST is present if we are here */ + sysfs_attr_init(&fan->fst_speed.attr); + fan->fst_speed.show = show_fan_speed; + fan->fst_speed.store = NULL; + fan->fst_speed.attr.name = "fan_speed_rpm"; + fan->fst_speed.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr); + if (status) + return status; + + if (!fan->acpi4) + return 0; + + sysfs_attr_init(&fan->fine_grain_control.attr); + fan->fine_grain_control.show = show_fine_grain_control; + fan->fine_grain_control.store = NULL; + fan->fine_grain_control.attr.name = "fine_grain_control"; + fan->fine_grain_control.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr); + if (status) + goto rem_fst_attr; + + for (i = 0; i < fan->fps_count; ++i) { + struct acpi_fan_fps *fps = &fan->fps[i]; + + snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); + sysfs_attr_init(&fps->dev_attr.attr); + fps->dev_attr.show = show_state; + fps->dev_attr.store = NULL; + fps->dev_attr.attr.name = fps->name; + fps->dev_attr.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); + if (status) { + int j; + + for (j = 0; j < i; ++j) + sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); + goto rem_fine_grain_attr; + } + } + + return 0; + +rem_fine_grain_attr: + sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr); + +rem_fst_attr: + sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr); + + return status; +} + +void acpi_fan_delete_attributes(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + int i; + + sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr); + + if (!fan->acpi4) + return; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + + sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr); +} diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c new file mode 100644 index 000000000000..fb08b8549ed7 --- /dev/null +++ b/drivers/acpi/fan_core.c @@ -0,0 +1,701 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * fan_core.c - ACPI Fan core Driver + * + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2022 Intel Corporation. All rights reserved. + */ + +#include <linux/bits.h> +#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/math.h> +#include <linux/math64.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/uuid.h> +#include <linux/thermal.h> +#include <linux/acpi.h> +#include <linux/platform_device.h> +#include <linux/sort.h> + +#include "fan.h" + +#define ACPI_FAN_NOTIFY_STATE_CHANGED 0x80 + +/* + * Defined inside the "Fan Noise Signal" section at + * https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide. + */ +static const guid_t acpi_fan_microsoft_guid = GUID_INIT(0xA7611840, 0x99FE, 0x41AE, 0xA4, 0x88, + 0x35, 0xC7, 0x59, 0x26, 0xC8, 0xEB); +#define ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY 1 +#define ACPI_FAN_DSM_SET_TRIP_POINTS 2 +#define ACPI_FAN_DSM_GET_OPERATING_RANGES 3 + +/* + * Ensures that fans with a very low trip point granularity + * do not send too many notifications. + */ +static uint min_trip_distance = 100; +module_param(min_trip_distance, uint, 0); +MODULE_PARM_DESC(min_trip_distance, "Minimum distance between fan speed trip points in RPM"); + +static const struct acpi_device_id fan_device_ids[] = { + ACPI_FAN_DEVICE_IDS, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, fan_device_ids); + +/* thermal cooling device callbacks */ +static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long + *state) +{ + struct acpi_device *device = cdev->devdata; + struct acpi_fan *fan = acpi_driver_data(device); + + if (fan->acpi4) { + if (fan->fif.fine_grain_ctrl) + *state = 100 / fan->fif.step_size; + else + *state = fan->fps_count - 1; + } else { + *state = 1; + } + + return 0; +} + +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + int ret = 0; + + status = acpi_evaluate_object(handle, "_FST", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -EIO; + + obj = buffer.pointer; + if (!obj) + return -ENODATA; + + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) { + ret = -EPROTO; + goto err; + } + + if (obj->package.elements[0].type != ACPI_TYPE_INTEGER || + obj->package.elements[1].type != ACPI_TYPE_INTEGER || + obj->package.elements[2].type != ACPI_TYPE_INTEGER) { + ret = -EPROTO; + goto err; + } + + fst->revision = obj->package.elements[0].integer.value; + fst->control = obj->package.elements[1].integer.value; + fst->speed = obj->package.elements[2].integer.value; + +err: + kfree(obj); + return ret; +} + +static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) +{ + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_fan_fst fst; + int status, i; + + status = acpi_fan_get_fst(device->handle, &fst); + if (status) + return status; + + if (fan->fif.fine_grain_ctrl) { + /* This control should be same what we set using _FSL by spec */ + if (fst.control > 100) { + dev_dbg(&device->dev, "Invalid control value returned\n"); + goto match_fps; + } + + *state = (int) fst.control / fan->fif.step_size; + return 0; + } + +match_fps: + for (i = 0; i < fan->fps_count; i++) { + if (fst.control == fan->fps[i].control) + break; + } + if (i == fan->fps_count) { + dev_dbg(&device->dev, "No matching fps control value\n"); + return -EINVAL; + } + + *state = i; + + return status; +} + +static int fan_get_state(struct acpi_device *device, unsigned long *state) +{ + int result; + int acpi_state = ACPI_STATE_D0; + + result = acpi_device_update_power(device, &acpi_state); + if (result) + return result; + + *state = acpi_state == ACPI_STATE_D3_COLD + || acpi_state == ACPI_STATE_D3_HOT ? + 0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1); + return 0; +} + +static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long + *state) +{ + struct acpi_device *device = cdev->devdata; + struct acpi_fan *fan = acpi_driver_data(device); + + if (fan->acpi4) + return fan_get_state_acpi4(device, state); + else + return fan_get_state(device, state); +} + +static int fan_set_state(struct acpi_device *device, unsigned long state) +{ + if (state != 0 && state != 1) + return -EINVAL; + + return acpi_device_set_power(device, + state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); +} + +static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state) +{ + struct acpi_fan *fan = acpi_driver_data(device); + acpi_status status; + u64 value = state; + int max_state; + + if (fan->fif.fine_grain_ctrl) + max_state = 100 / fan->fif.step_size; + else + max_state = fan->fps_count - 1; + + if (state > max_state) + return -EINVAL; + + if (fan->fif.fine_grain_ctrl) { + value *= fan->fif.step_size; + /* Spec allows compensate the last step only */ + if (value + fan->fif.step_size > 100) + value = 100; + } else { + value = fan->fps[state].control; + } + + status = acpi_execute_simple_method(device->handle, "_FSL", value); + if (ACPI_FAILURE(status)) { + dev_dbg(&device->dev, "Failed to set state by _FSL\n"); + return -ENODEV; + } + + return 0; +} + +static int +fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct acpi_device *device = cdev->devdata; + struct acpi_fan *fan = acpi_driver_data(device); + + if (fan->acpi4) + return fan_set_state_acpi4(device, state); + else + return fan_set_state(device, state); +} + +static const struct thermal_cooling_device_ops fan_cooling_ops = { + .get_max_state = fan_get_max_state, + .get_cur_state = fan_get_cur_state, + .set_cur_state = fan_set_cur_state, +}; + +/* -------------------------------------------------------------------------- + * Driver Interface + * -------------------------------------------------------------------------- +*/ + +static int acpi_fan_get_fif(struct acpi_device *device) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_buffer format = { sizeof("NNNN"), "NNNN" }; + u64 fields[4]; + struct acpi_buffer fif = { sizeof(fields), fields }; + union acpi_object *obj; + acpi_status status; + + status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer); + if (ACPI_FAILURE(status)) + return status; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE) { + dev_err(&device->dev, "Invalid _FIF data\n"); + status = -EINVAL; + goto err; + } + + status = acpi_extract_package(obj, &format, &fif); + if (ACPI_FAILURE(status)) { + dev_err(&device->dev, "Invalid _FIF element\n"); + status = -EINVAL; + goto err; + } + + fan->fif.revision = fields[0]; + fan->fif.fine_grain_ctrl = fields[1]; + fan->fif.step_size = fields[2]; + fan->fif.low_speed_notification = fields[3]; + + /* If there is a bug in step size and set as 0, change to 1 */ + if (!fan->fif.step_size) + fan->fif.step_size = 1; + /* If step size > 9, change to 9 (by spec valid values 1-9) */ + else if (fan->fif.step_size > 9) + fan->fif.step_size = 9; +err: + kfree(obj); + return status; +} + +static int acpi_fan_speed_cmp(const void *a, const void *b) +{ + const struct acpi_fan_fps *fps1 = a; + const struct acpi_fan_fps *fps2 = b; + return fps1->speed - fps2->speed; +} + +static int acpi_fan_get_fps(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + int i; + + status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer); + if (ACPI_FAILURE(status)) + return status; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { + dev_err(&device->dev, "Invalid _FPS data\n"); + status = -EINVAL; + goto err; + } + + fan->fps_count = obj->package.count - 1; /* minus revision field */ + fan->fps = devm_kcalloc(&device->dev, + fan->fps_count, sizeof(struct acpi_fan_fps), + GFP_KERNEL); + if (!fan->fps) { + dev_err(&device->dev, "Not enough memory\n"); + status = -ENOMEM; + goto err; + } + for (i = 0; i < fan->fps_count; i++) { + struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; + struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name), + &fan->fps[i] }; + status = acpi_extract_package(&obj->package.elements[i + 1], + &format, &fps); + if (ACPI_FAILURE(status)) { + dev_err(&device->dev, "Invalid _FPS element\n"); + goto err; + } + } + + /* sort the state array according to fan speed in increase order */ + sort(fan->fps, fan->fps_count, sizeof(*fan->fps), + acpi_fan_speed_cmp, NULL); + +err: + kfree(obj); + return status; +} + +static int acpi_fan_dsm_init(struct device *dev) +{ + union acpi_object dummy = { + .package = { + .type = ACPI_TYPE_PACKAGE, + .count = 0, + .elements = NULL, + }, + }; + struct acpi_fan *fan = dev_get_drvdata(dev); + union acpi_object *obj; + int ret = 0; + + if (!acpi_check_dsm(fan->handle, &acpi_fan_microsoft_guid, 0, + BIT(ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY) | + BIT(ACPI_FAN_DSM_SET_TRIP_POINTS))) + return 0; + + dev_info(dev, "Using Microsoft fan extensions\n"); + + obj = acpi_evaluate_dsm_typed(fan->handle, &acpi_fan_microsoft_guid, 0, + ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY, &dummy, + ACPI_TYPE_INTEGER); + if (!obj) + return -EIO; + + if (obj->integer.value > U32_MAX) + ret = -EOVERFLOW; + else + fan->fan_trip_granularity = obj->integer.value; + + kfree(obj); + + return ret; +} + +static int acpi_fan_dsm_set_trip_points(struct device *dev, u64 upper, u64 lower) +{ + union acpi_object args[2] = { + { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = lower, + }, + }, + { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = upper, + }, + }, + }; + struct acpi_fan *fan = dev_get_drvdata(dev); + union acpi_object in = { + .package = { + .type = ACPI_TYPE_PACKAGE, + .count = ARRAY_SIZE(args), + .elements = args, + }, + }; + union acpi_object *obj; + + obj = acpi_evaluate_dsm(fan->handle, &acpi_fan_microsoft_guid, 0, + ACPI_FAN_DSM_SET_TRIP_POINTS, &in); + kfree(obj); + + return 0; +} + +static int acpi_fan_dsm_start(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + int ret; + + if (!fan->fan_trip_granularity) + return 0; + + /* + * Some firmware implementations only update the values returned by the + * _FST control method when a notification is received. This usually + * works with Microsoft Windows as setting up trip points will keep + * triggering said notifications, but will cause issues when using _FST + * without the Microsoft-specific trip point extension. + * + * Because of this, an initial notification needs to be triggered to + * start the cycle of trip points updates. This is achieved by setting + * the trip points sequencially to two separate ranges. As by the + * Microsoft specification the firmware should trigger a notification + * immediately if the fan speed is outside the trip point range. This + * _should_ result in at least one notification as both ranges do not + * overlap, meaning that the current fan speed needs to be outside at + * least one range. + */ + ret = acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity, 0); + if (ret < 0) + return ret; + + return acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity * 3, + fan->fan_trip_granularity * 2); +} + +static int acpi_fan_dsm_update_trips_points(struct device *dev, struct acpi_fan_fst *fst) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + u64 upper, lower; + + if (!fan->fan_trip_granularity) + return 0; + + if (!acpi_fan_speed_valid(fst->speed)) + return -EINVAL; + + upper = roundup_u64(fst->speed + min_trip_distance, fan->fan_trip_granularity); + if (fst->speed <= min_trip_distance) { + lower = 0; + } else { + /* + * Valid fan speed values cannot be larger than 32 bit, so + * we can safely assume that no overflow will happen here. + */ + lower = rounddown((u32)fst->speed - min_trip_distance, fan->fan_trip_granularity); + } + + return acpi_fan_dsm_set_trip_points(dev, upper, lower); +} + +static void acpi_fan_notify_handler(acpi_handle handle, u32 event, void *context) +{ + struct device *dev = context; + struct acpi_fan_fst fst; + int ret; + + switch (event) { + case ACPI_FAN_NOTIFY_STATE_CHANGED: + /* + * The ACPI specification says that we must evaluate _FST when we + * receive an ACPI event indicating that the fan state has changed. + */ + ret = acpi_fan_get_fst(handle, &fst); + if (ret < 0) { + dev_err(dev, "Error retrieving current fan status: %d\n", ret); + } else { + ret = acpi_fan_dsm_update_trips_points(dev, &fst); + if (ret < 0) + dev_err(dev, "Failed to update trip points: %d\n", ret); + } + + acpi_fan_notify_hwmon(dev); + acpi_bus_generate_netlink_event("fan", dev_name(dev), event, 0); + break; + default: + dev_dbg(dev, "Unsupported ACPI notification 0x%x\n", event); + break; + } +} + +static void acpi_fan_notify_remove(void *data) +{ + struct acpi_fan *fan = data; + + acpi_remove_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY, acpi_fan_notify_handler); +} + +static int devm_acpi_fan_notify_init(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + acpi_status status; + + status = acpi_install_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY, + acpi_fan_notify_handler, dev); + if (ACPI_FAILURE(status)) + return -EIO; + + return devm_add_action_or_reset(dev, acpi_fan_notify_remove, fan); +} + +static int acpi_fan_probe(struct platform_device *pdev) +{ + int result = 0; + struct thermal_cooling_device *cdev; + struct acpi_fan *fan; + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + char *name; + + if (!device) + return -ENODEV; + + fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); + if (!fan) { + dev_err(&device->dev, "No memory for fan\n"); + return -ENOMEM; + } + + fan->handle = device->handle; + device->driver_data = fan; + platform_set_drvdata(pdev, fan); + + if (acpi_has_method(device->handle, "_FST")) { + fan->has_fst = true; + fan->acpi4 = acpi_has_method(device->handle, "_FIF") && + acpi_has_method(device->handle, "_FPS") && + acpi_has_method(device->handle, "_FSL"); + } + + if (fan->acpi4) { + result = acpi_fan_get_fif(device); + if (result) + return result; + + result = acpi_fan_get_fps(device); + if (result) + return result; + } + + if (fan->has_fst) { + result = acpi_fan_dsm_init(&pdev->dev); + if (result) + return result; + + result = devm_acpi_fan_create_hwmon(&pdev->dev); + if (result) + return result; + + result = devm_acpi_fan_notify_init(&pdev->dev); + if (result) + return result; + + result = acpi_fan_dsm_start(&pdev->dev); + if (result) { + dev_err(&pdev->dev, "Failed to start Microsoft fan extensions\n"); + return result; + } + + result = acpi_fan_create_attributes(device); + if (result) + return result; + } + + if (!fan->acpi4) { + result = acpi_device_update_power(device, NULL); + if (result) { + dev_err(&device->dev, "Failed to set initial power state\n"); + goto err_end; + } + } + + if (!strncmp(pdev->name, "PNP0C0B", strlen("PNP0C0B"))) + name = "Fan"; + else + name = acpi_device_bid(device); + + cdev = thermal_cooling_device_register(name, device, + &fan_cooling_ops); + if (IS_ERR(cdev)) { + result = PTR_ERR(cdev); + goto err_end; + } + + dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); + + fan->cdev = cdev; + result = sysfs_create_link(&pdev->dev.kobj, + &cdev->device.kobj, + "thermal_cooling"); + if (result) { + dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n"); + goto err_unregister; + } + + result = sysfs_create_link(&cdev->device.kobj, + &pdev->dev.kobj, + "device"); + if (result) { + dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); + goto err_remove_link; + } + + return 0; + +err_remove_link: + sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); +err_unregister: + thermal_cooling_device_unregister(cdev); +err_end: + if (fan->has_fst) + acpi_fan_delete_attributes(device); + + return result; +} + +static void acpi_fan_remove(struct platform_device *pdev) +{ + struct acpi_fan *fan = platform_get_drvdata(pdev); + + if (fan->has_fst) { + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + + acpi_fan_delete_attributes(device); + } + sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&fan->cdev->device.kobj, "device"); + thermal_cooling_device_unregister(fan->cdev); +} + +#ifdef CONFIG_PM_SLEEP +static int acpi_fan_suspend(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + if (fan->acpi4) + return 0; + + acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0); + + return AE_OK; +} + +static int acpi_fan_resume(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + int result; + + if (fan->has_fst) { + result = acpi_fan_dsm_start(dev); + if (result) + dev_err(dev, "Failed to start Microsoft fan extensions: %d\n", result); + } + + if (fan->acpi4) + return 0; + + result = acpi_device_update_power(ACPI_COMPANION(dev), NULL); + if (result) + dev_err(dev, "Error updating fan power state\n"); + + return result; +} + +static const struct dev_pm_ops acpi_fan_pm = { + .resume = acpi_fan_resume, + .freeze = acpi_fan_suspend, + .thaw = acpi_fan_resume, + .restore = acpi_fan_resume, +}; +#define FAN_PM_OPS_PTR (&acpi_fan_pm) + +#else + +#define FAN_PM_OPS_PTR NULL + +#endif + +static struct platform_driver acpi_fan_driver = { + .probe = acpi_fan_probe, + .remove = acpi_fan_remove, + .driver = { + .name = "acpi-fan", + .acpi_match_table = fan_device_ids, + .pm = FAN_PM_OPS_PTR, + }, +}; + +module_platform_driver(acpi_fan_driver); + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Fan Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c new file mode 100644 index 000000000000..d3374f8f524b --- /dev/null +++ b/drivers/acpi/fan_hwmon.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * hwmon interface for the ACPI Fan driver. + * + * Copyright (C) 2024 Armin Wolf <W_Armin@gmx.de> + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/limits.h> +#include <linux/types.h> +#include <linux/units.h> + +#include "fan.h" + +static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control) +{ + unsigned int i; + + for (i = 0; i < fan->fps_count; i++) { + if (fan->fps[i].control == control) + return &fan->fps[i]; + } + + return NULL; +} + +static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct acpi_fan *fan = drvdata; + unsigned int i; + + switch (type) { + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + return 0444; + case hwmon_fan_target: + /* Only acpi4 fans support fan control. */ + if (!fan->acpi4) + return 0; + + /* + * When in fine grain control mode, not every fan control value + * has an associated fan performance state. + */ + if (fan->fif.fine_grain_ctrl) + return 0; + + return 0444; + default: + return 0; + } + case hwmon_power: + switch (attr) { + case hwmon_power_input: + /* Only acpi4 fans support fan control. */ + if (!fan->acpi4) + return 0; + + /* + * When in fine grain control mode, not every fan control value + * has an associated fan performance state. + */ + if (fan->fif.fine_grain_ctrl) + return 0; + + /* + * When all fan performance states contain no valid power data, + * when the associated attribute should not be created. + */ + for (i = 0; i < fan->fps_count; i++) { + if (acpi_fan_power_valid(fan->fps[i].power)) + return 0444; + } + + return 0; + default: + return 0; + } + default: + return 0; + } +} + +static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + struct acpi_fan_fps *fps; + struct acpi_fan_fst fst; + int ret; + + ret = acpi_fan_get_fst(fan->handle, &fst); + if (ret < 0) + return ret; + + switch (type) { + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + if (!acpi_fan_speed_valid(fst.speed)) + return -ENODEV; + + if (fst.speed > LONG_MAX) + return -EOVERFLOW; + + *val = fst.speed; + return 0; + case hwmon_fan_target: + fps = acpi_fan_get_current_fps(fan, fst.control); + if (!fps) + return -EIO; + + if (fps->speed > LONG_MAX) + return -EOVERFLOW; + + *val = fps->speed; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_power: + switch (attr) { + case hwmon_power_input: + fps = acpi_fan_get_current_fps(fan, fst.control); + if (!fps) + return -EIO; + + if (!acpi_fan_power_valid(fps->power)) + return -ENODEV; + + if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT) + return -EOVERFLOW; + + *val = fps->power * MICROWATT_PER_MILLIWATT; + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops acpi_fan_hwmon_ops = { + .is_visible = acpi_fan_hwmon_is_visible, + .read = acpi_fan_hwmon_read, +}; + +static const struct hwmon_channel_info * const acpi_fan_hwmon_info[] = { + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_TARGET), + HWMON_CHANNEL_INFO(power, HWMON_P_INPUT), + NULL +}; + +static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = { + .ops = &acpi_fan_hwmon_ops, + .info = acpi_fan_hwmon_info, +}; + +void acpi_fan_notify_hwmon(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + + hwmon_notify_event(fan->hdev, hwmon_fan, hwmon_fan_input, 0); +} + +int devm_acpi_fan_create_hwmon(struct device *dev) +{ + struct acpi_fan *fan = dev_get_drvdata(dev); + + fan->hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, + &acpi_fan_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(fan->hdev); +} diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index edd10b3c7ec8..a194f30876c5 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -1,12 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Link physical devices with ACPI devices support * * Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2005 Intel Corp. - * - * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi_iort.h> #include <linux/export.h> #include <linux/init.h> @@ -16,21 +17,12 @@ #include <linux/rwsem.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/pci-acpi.h> #include <linux/platform_device.h> #include "internal.h" -#define ACPI_GLUE_DEBUG 0 -#if ACPI_GLUE_DEBUG -#define DBG(fmt, ...) \ - printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__) -#else -#define DBG(fmt, ...) \ -do { \ - if (0) \ - printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \ -} while (0) -#endif static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); @@ -45,7 +37,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type) down_write(&bus_type_sem); list_add_tail(&type->list, &bus_type_list); up_write(&bus_type_sem); - printk(KERN_INFO PREFIX "bus type %s registered\n", type->name); + pr_info("bus type %s registered\n", type->name); return 0; } return -ENODEV; @@ -60,8 +52,7 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type) down_write(&bus_type_sem); list_del_init(&type->list); up_write(&bus_type_sem); - printk(KERN_INFO PREFIX "bus type %s unregistered\n", - type->name); + pr_info("bus type %s unregistered\n", type->name); return 0; } return -ENODEV; @@ -84,21 +75,41 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) } #define FIND_CHILD_MIN_SCORE 1 -#define FIND_CHILD_MAX_SCORE 2 +#define FIND_CHILD_MID_SCORE 2 +#define FIND_CHILD_MAX_SCORE 3 + +static int match_any(struct acpi_device *adev, void *not_used) +{ + return 1; +} + +static bool acpi_dev_has_children(struct acpi_device *adev) +{ + return acpi_dev_for_each_child(adev, match_any, NULL) > 0; +} static int find_child_checks(struct acpi_device *adev, bool check_children) { - bool sta_present = true; unsigned long long sta; acpi_status status; - status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); - if (status == AE_NOT_FOUND) - sta_present = false; - else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) + if (check_children && !acpi_dev_has_children(adev)) return -ENODEV; - if (check_children && list_empty(&adev->children)) + status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); + if (status == AE_NOT_FOUND) { + /* + * Special case: backlight device objects without _STA are + * preferred to other objects with the same _ADR value, because + * it is more likely that they are actually useful. + */ + if (adev->pnp.type.backlight) + return FIND_CHILD_MID_SCORE; + + return FIND_CHILD_MIN_SCORE; + } + + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) return -ENODEV; /* @@ -108,61 +119,103 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) * matched going forward. [This means a second spec violation in a row, * so whatever we do here is best effort anyway.] */ - return sta_present && !adev->pnp.type.platform_id ? - FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; + if (adev->pnp.type.platform_id) + return FIND_CHILD_MIN_SCORE; + + return FIND_CHILD_MAX_SCORE; } -struct acpi_device *acpi_find_child_device(struct acpi_device *parent, - u64 address, bool check_children) +struct find_child_walk_data { + struct acpi_device *adev; + u64 address; + int score; + bool check_sta; + bool check_children; +}; + +static int check_one_child(struct acpi_device *adev, void *data) { - struct acpi_device *adev, *ret = NULL; - int ret_score = 0; - - if (!parent) - return NULL; - - list_for_each_entry(adev, &parent->children, node) { - unsigned long long addr; - acpi_status status; - int score; - - status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR, - NULL, &addr); - if (ACPI_FAILURE(status) || addr != address) - continue; - - if (!ret) { - /* This is the first matching object. Save it. */ - ret = adev; - continue; - } + struct find_child_walk_data *wd = data; + int score; + + if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address) + return 0; + + if (!wd->adev) { /* - * There is more than one matching device object with the same - * _ADR value. That really is unexpected, so we are kind of - * beyond the scope of the spec here. We have to choose which - * one to return, though. - * - * First, check if the previously found object is good enough - * and return it if so. Second, do the same for the object that - * we've just found. + * This is the first matching object, so save it. If it is not + * necessary to look for any other matching objects, stop the + * search. */ - if (!ret_score) { - ret_score = find_child_checks(ret, check_children); - if (ret_score == FIND_CHILD_MAX_SCORE) - return ret; - } - score = find_child_checks(adev, check_children); - if (score == FIND_CHILD_MAX_SCORE) { - return adev; - } else if (score > ret_score) { - ret = adev; - ret_score = score; - } + wd->adev = adev; + return !(wd->check_sta || wd->check_children); } - return ret; + + /* + * There is more than one matching device object with the same _ADR + * value. That really is unexpected, so we are kind of beyond the scope + * of the spec here. We have to choose which one to return, though. + * + * First, get the score for the previously found object and terminate + * the walk if it is maximum. + */ + if (!wd->score) { + score = find_child_checks(wd->adev, wd->check_children); + if (score == FIND_CHILD_MAX_SCORE) + return 1; + + wd->score = score; + } + /* + * Second, if the object that has just been found has a better score, + * replace the previously found one with it and terminate the walk if + * the new score is maximum. + */ + score = find_child_checks(adev, wd->check_children); + if (score > wd->score) { + wd->adev = adev; + if (score == FIND_CHILD_MAX_SCORE) + return 1; + + wd->score = score; + } + + /* Continue, because there may be better matches. */ + return 0; +} + +static struct acpi_device *acpi_find_child(struct acpi_device *parent, + u64 address, bool check_children, + bool check_sta) +{ + struct find_child_walk_data wd = { + .address = address, + .check_children = check_children, + .check_sta = check_sta, + .adev = NULL, + .score = 0, + }; + + if (parent) + acpi_dev_for_each_child(parent, check_one_child, &wd); + + return wd.adev; +} + +struct acpi_device *acpi_find_child_device(struct acpi_device *parent, + u64 address, bool check_children) +{ + return acpi_find_child(parent, address, check_children, true); } EXPORT_SYMBOL_GPL(acpi_find_child_device); +struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, + acpi_bus_address adr) +{ + return acpi_find_child(adev, adr, false, false); +} +EXPORT_SYMBOL_GPL(acpi_find_child_by_adr); + static void acpi_physnode_link_name(char *buf, unsigned int node_id) { if (node_id > 0) @@ -191,7 +244,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!acpi_dev) return -EINVAL; - get_device(&acpi_dev->dev); + acpi_dev_get(acpi_dev); get_device(dev); physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { @@ -218,7 +271,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) goto err; put_device(dev); - put_device(&acpi_dev->dev); + acpi_dev_put(acpi_dev); return 0; } if (pn->node_id == node_id) { @@ -258,7 +311,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) err: ACPI_COMPANION_SET(dev, NULL); put_device(dev); - put_device(&acpi_dev->dev); + acpi_dev_put(acpi_dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); @@ -286,7 +339,7 @@ int acpi_unbind_one(struct device *dev) ACPI_COMPANION_SET(dev, NULL); /* Drop references taken by acpi_bind_one(). */ put_device(dev); - put_device(&acpi_dev->dev); + acpi_dev_put(acpi_dev); kfree(entry); break; } @@ -296,82 +349,66 @@ int acpi_unbind_one(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_unbind_one); -static int acpi_device_notify(struct device *dev) +void acpi_device_notify(struct device *dev) { - struct acpi_bus_type *type = acpi_get_bus_type(dev); struct acpi_device *adev; int ret; ret = acpi_bind_one(dev, NULL); - if (ret && type) { - struct acpi_device *adev; + if (ret) { + struct acpi_bus_type *type = acpi_get_bus_type(dev); + + if (!type) + goto err; adev = type->find_companion(dev); if (!adev) { - DBG("Unable to get handle for %s\n", dev_name(dev)); - ret = -ENODEV; - goto out; + dev_dbg(dev, "ACPI companion not found\n"); + goto err; } ret = acpi_bind_one(dev, adev); if (ret) - goto out; - } - adev = ACPI_COMPANION(dev); - if (!adev) - goto out; + goto err; - if (dev_is_platform(dev)) - acpi_configure_pmsi_domain(dev); + if (type->setup) { + type->setup(dev); + goto done; + } + } else { + adev = ACPI_COMPANION(dev); + + if (dev_is_pci(dev)) { + pci_acpi_setup(dev, adev); + goto done; + } else if (dev_is_platform(dev)) { + acpi_configure_pmsi_domain(dev); + } + } - if (type && type->setup) - type->setup(dev); - else if (adev->handler && adev->handler->bind) + if (adev->handler && adev->handler->bind) adev->handler->bind(dev); - out: -#if ACPI_GLUE_DEBUG - if (!ret) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; +done: + acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n", + dev_name(dev)); - acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer); - DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer); - kfree(buffer.pointer); - } else - DBG("Device %s -> No ACPI support\n", dev_name(dev)); -#endif + return; - return ret; +err: + dev_dbg(dev, "No ACPI support\n"); } -static int acpi_device_notify_remove(struct device *dev) +void acpi_device_notify_remove(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); - struct acpi_bus_type *type; if (!adev) - return 0; + return; - type = acpi_get_bus_type(dev); - if (type && type->cleanup) - type->cleanup(dev); + if (dev_is_pci(dev)) + pci_acpi_cleanup(dev, adev); else if (adev->handler && adev->handler->unbind) adev->handler->unbind(dev); acpi_unbind_one(dev); - return 0; -} - -int acpi_platform_notify(struct device *dev, enum kobject_action action) -{ - switch (action) { - case KOBJ_ADD: - acpi_device_notify(dev); - break; - case KOBJ_REMOVE: - acpi_device_notify_remove(dev); - break; - default: - break; - } - return 0; } diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index 5c67a6d8f803..3499f86c411e 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI Hardware Error Device (PNP0C33) Driver * @@ -6,15 +7,6 @@ * * ACPI Hardware Error Device is used to report some hardware errors * notified via SCI, mainly the corrected errors. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation; - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -50,24 +42,33 @@ EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier); * it is used by HEST Generic Hardware Error Source with notify type * SCI. */ -static void acpi_hed_notify(struct acpi_device *device, u32 event) +static void acpi_hed_notify(acpi_handle handle, u32 event, void *data) { blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL); } static int acpi_hed_add(struct acpi_device *device) { + int err; + /* Only one hardware error device */ if (hed_handle) return -EINVAL; hed_handle = device->handle; - return 0; + + err = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_hed_notify, device); + if (err) + hed_handle = NULL; + + return err; } -static int acpi_hed_remove(struct acpi_device *device) +static void acpi_hed_remove(struct acpi_device *device) { + acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_hed_notify); hed_handle = NULL; - return 0; } static struct acpi_driver acpi_hed_driver = { @@ -77,12 +78,15 @@ static struct acpi_driver acpi_hed_driver = { .ops = { .add = acpi_hed_add, .remove = acpi_hed_remove, - .notify = acpi_hed_notify, }, }; -module_acpi_driver(acpi_hed_driver); -ACPI_MODULE_NAME("hed"); +static int __init acpi_hed_driver_init(void) +{ + return acpi_bus_register_driver(&acpi_hed_driver); +} +subsys_initcall(acpi_hed_driver_init); + MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 6a9e1fb8913a..40f875b265a9 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -1,29 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * acpi/internal.h * For use by Linux/ACPI infrastructure, not drivers * * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #ifndef _ACPI_INTERNAL_H_ #define _ACPI_INTERNAL_H_ -#define PREFIX "ACPI: " +#include <linux/idr.h> + +extern struct acpi_device *acpi_root; int early_acpi_osi_init(void); int acpi_osi_init(void); acpi_status acpi_os_initialize1(void); -int acpi_scan_init(void); +void acpi_scan_init(void); #ifdef CONFIG_PCI void acpi_pci_root_init(void); void acpi_pci_link_init(void); @@ -34,12 +27,6 @@ static inline void acpi_pci_link_init(void) {} void acpi_processor_init(void); void acpi_platform_init(void); void acpi_pnp_init(void); -void acpi_int340x_thermal_init(void); -#ifdef CONFIG_ARM_AMBA -void acpi_amba_init(void); -#else -static inline void acpi_amba_init(void) {} -#endif int acpi_sysfs_init(void); void acpi_gpe_apply_masked_gpes(void); void acpi_container_init(void); @@ -81,7 +68,8 @@ void acpi_debugfs_init(void); #else static inline void acpi_debugfs_init(void) { return; } #endif -#ifdef CONFIG_PCI + +#if defined(CONFIG_X86) && defined(CONFIG_PCI) void acpi_lpss_init(void); #else static inline void acpi_lpss_init(void) {} @@ -95,7 +83,21 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src); bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); -void acpi_scan_table_handler(u32 event, void *table, void *context); +void acpi_scan_table_notify(void); + +int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); +int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); +int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); +int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); + +#ifdef CONFIG_ARM64 +int acpi_arch_thermal_cpufreq_pctg(void); +#else +static inline int acpi_arch_thermal_cpufreq_pctg(void) +{ + return 0; +} +#endif /* -------------------------------------------------------------------------- Device Node Initialization / Removal @@ -106,19 +108,21 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); extern struct list_head acpi_bus_id_list; struct acpi_device_bus_id { - char bus_id[15]; - unsigned int instance_no; + const char *bus_id; + struct ida instance_ida; struct list_head node; }; -int acpi_device_add(struct acpi_device *device, - void (*release)(struct device *)); void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, - int type, unsigned long long sta); -int acpi_device_setup_files(struct acpi_device *dev); + int type, void (*release)(struct device *)); +int acpi_tie_acpi_dev(struct acpi_device *adev); +int acpi_device_add(struct acpi_device *device); +void acpi_device_setup_files(struct acpi_device *dev); void acpi_device_remove_files(struct acpi_device *dev); +extern const struct attribute_group *acpi_groups[]; void acpi_device_add_finalize(struct acpi_device *device); void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); +bool acpi_device_is_enabled(const struct acpi_device *adev); bool acpi_device_is_present(const struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, @@ -128,46 +132,69 @@ int acpi_bus_register_early_device(int type); /* -------------------------------------------------------------------------- Device Matching and Notification -------------------------------------------------------------------------- */ -struct acpi_device *acpi_companion_match(const struct device *dev); -int __acpi_device_uevent_modalias(struct acpi_device *adev, +const struct acpi_device *acpi_companion_match(const struct device *dev); +int __acpi_device_uevent_modalias(const struct acpi_device *adev, struct kobj_uevent_env *env); /* -------------------------------------------------------------------------- Power Resource -------------------------------------------------------------------------- */ -int acpi_power_init(void); +void acpi_power_resources_init(void); void acpi_power_resources_list_free(struct list_head *list); int acpi_extract_power_resources(union acpi_object *package, unsigned int start, struct list_head *list); -int acpi_add_power_resource(acpi_handle handle); +struct acpi_device *acpi_add_power_resource(acpi_handle handle); void acpi_power_add_remove_device(struct acpi_device *adev, bool add); int acpi_power_wakeup_list_init(struct list_head *list, int *system_level); int acpi_device_sleep_wake(struct acpi_device *dev, - int enable, int sleep_state, int dev_state); + int enable, int sleep_state, int dev_state); int acpi_power_get_inferred_state(struct acpi_device *device, int *state); int acpi_power_on_resources(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state); +void acpi_turn_off_unused_power_resources(void); +/* -------------------------------------------------------------------------- + Device Power Management + -------------------------------------------------------------------------- */ +int acpi_device_get_power(struct acpi_device *device, int *state); int acpi_wakeup_device_init(void); +/* -------------------------------------------------------------------------- + Processor + -------------------------------------------------------------------------- */ #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC +void acpi_early_processor_control_setup(void); void acpi_early_processor_set_pdc(void); +#ifdef CONFIG_X86 +void acpi_proc_quirk_mwait_check(void); #else -static inline void acpi_early_processor_set_pdc(void) {} +static inline void acpi_proc_quirk_mwait_check(void) {} +#endif +bool processor_physically_present(acpi_handle handle); +#else +static inline void acpi_early_processor_control_setup(void) {} #endif -#ifdef CONFIG_X86 -void acpi_early_processor_osc(void); +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +void acpi_idle_rescan_dead_smt_siblings(void); #else -static inline void acpi_early_processor_osc(void) {} +static inline void acpi_idle_rescan_dead_smt_siblings(void) {} #endif /* -------------------------------------------------------------------------- Embedded Controller -------------------------------------------------------------------------- */ + +enum acpi_ec_event_state { + EC_EVENT_READY = 0, /* Event work can be submitted */ + EC_EVENT_IN_PROGRESS, /* Event work is pending or being processed */ + EC_EVENT_COMPLETE, /* Event work processing has completed */ +}; + struct acpi_ec { acpi_handle handle; - u32 gpe; + int gpe; + int irq; unsigned long command_addr; unsigned long data_addr; bool global_lock; @@ -180,7 +207,10 @@ struct acpi_ec { spinlock_t lock; struct work_struct work; unsigned long timestamp; - unsigned long nr_pending_queries; + enum acpi_ec_event_state event_state; + unsigned int events_to_process; + unsigned int events_in_progress; + unsigned int queries_in_progress; bool busy_polling; unsigned int polling_guard; }; @@ -191,34 +221,56 @@ extern struct acpi_ec *first_ec; /* External interfaces use first EC only, so remember */ typedef int (*acpi_ec_query_func) (void *data); -int acpi_ec_init(void); -int acpi_ec_ecdt_probe(void); -int acpi_ec_dsdt_probe(void); +#ifdef CONFIG_ACPI_EC + +void acpi_ec_init(void); +void acpi_ec_ecdt_probe(void); +void acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); -void acpi_ec_mark_gpe_for_wake(void); -void acpi_ec_set_gpe_wake_mask(u8 action); -void acpi_ec_dispatch_gpe(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); +void acpi_ec_register_opregions(struct acpi_device *adev); #ifdef CONFIG_PM_SLEEP void acpi_ec_flush_work(void); +bool acpi_ec_dispatch_gpe(void); #endif +#else + +static inline void acpi_ec_init(void) {} +static inline void acpi_ec_ecdt_probe(void) {} +static inline void acpi_ec_dsdt_probe(void) {} +static inline void acpi_ec_block_transactions(void) {} +static inline void acpi_ec_unblock_transactions(void) {} +static inline int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, + acpi_handle handle, acpi_ec_query_func func, + void *data) +{ + return -ENXIO; +} +static inline void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) {} +static inline void acpi_ec_register_opregions(struct acpi_device *adev) {} + +static inline void acpi_ec_flush_work(void) {} +static inline bool acpi_ec_dispatch_gpe(void) +{ + return false; +} + +#endif /*-------------------------------------------------------------------------- Suspend/Resume -------------------------------------------------------------------------- */ #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT extern bool acpi_s2idle_wakeup(void); -extern bool acpi_sleep_no_ec_events(void); extern int acpi_sleep_init(void); #else static inline bool acpi_s2idle_wakeup(void) { return false; } -static inline bool acpi_sleep_no_ec_events(void) { return true; } static inline int acpi_sleep_init(void) { return -ENXIO; } #endif @@ -236,6 +288,15 @@ static inline int suspend_nvs_save(void) { return 0; } static inline void suspend_nvs_restore(void) {} #endif +#ifdef CONFIG_X86 +bool force_storage_d3(void); +#else +static inline bool force_storage_d3(void) +{ + return false; +} +#endif + /*-------------------------------------------------------------------------- Device properties -------------------------------------------------------------------------- */ @@ -266,4 +327,18 @@ void acpi_init_lpit(void); static inline void acpi_init_lpit(void) { } #endif +/*-------------------------------------------------------------------------- + ACPI _CRS CSI-2 and MIPI DisCo for Imaging + -------------------------------------------------------------------------- */ + +void acpi_mipi_check_crs_csi2(acpi_handle handle); +void acpi_mipi_scan_crs_csi2(void); +void acpi_mipi_init_crs_csi2_swnodes(void); +void acpi_mipi_crs_csi2_cleanup(void); +#ifdef CONFIG_X86 +bool acpi_graph_ignore_port(acpi_handle handle); +#else +static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; } +#endif + #endif /* _ACPI_INTERNAL_H_ */ diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c index 3595aa9c7c18..6677955b4a8e 100644 --- a/drivers/acpi/ioapic.c +++ b/drivers/acpi/ioapic.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * IOAPIC/IOxAPIC/IOSAPIC driver * @@ -6,10 +7,6 @@ * * Copyright (C) 2014 Intel Corporation * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Based on original drivers/pci/ioapic.c * Yinghai Lu <yinghai@kernel.org> * Jiang Liu <jiang.liu@intel.com> @@ -27,6 +24,7 @@ #include <linux/acpi.h> #include <linux/pci.h> #include <acpi/acpi.h> +#include "internal.h" struct acpi_pci_ioapic { acpi_handle root_handle; diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba0528..d1595156c86a 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI GSI IRQ layer * * Copyright (C) 2015 ARM Ltd. * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/irq.h> @@ -15,7 +12,8 @@ enum acpi_irq_model_id acpi_irq_model; -static struct fwnode_handle *acpi_gsi_domain_id; +static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id; +static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi); /** * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI @@ -29,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id; */ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); *irq = irq_find_mapping(d, gsi); /* - * *irq == 0 means no mapping, that should - * be reported as a failure + * *irq == 0 means no mapping, that should be reported as a + * failure, unless there is an arch-specific fallback handler. */ + if (!*irq && acpi_gsi_to_irq_fallback) + *irq = acpi_gsi_to_irq_fallback(gsi); + return (*irq > 0) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); @@ -55,18 +57,23 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { struct irq_fwspec fwspec; + unsigned int irq; - if (WARN_ON(!acpi_gsi_domain_id)) { + fwspec.fwnode = acpi_get_gsi_domain_id(gsi); + if (WARN_ON(!fwspec.fwnode)) { pr_warn("GSI: No registered irqchip, giving up\n"); return -EINVAL; } - fwspec.fwnode = acpi_gsi_domain_id; fwspec.param[0] = gsi; fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; - return irq_create_fwspec_mapping(&fwspec); + irq = irq_create_fwspec_mapping(&fwspec); + if (!irq) + return -EINVAL; + + return irq; } EXPORT_SYMBOL_GPL(acpi_register_gsi); @@ -76,10 +83,15 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi); */ void acpi_unregister_gsi(u32 gsi) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); - int irq = irq_find_mapping(d, gsi); + struct irq_domain *d; + int irq; + + if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16)) + return; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); + irq = irq_find_mapping(d, gsi); irq_dispose_mapping(irq); } EXPORT_SYMBOL_GPL(acpi_unregister_gsi); @@ -87,6 +99,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); /** * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source. * @source: acpi_resource_source to use for the lookup. + * @gsi: GSI IRQ number * * Description: * Retrieve the fwhandle of the device referenced by the given IRQ resource @@ -96,7 +109,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); * The referenced device fwhandle or NULL on failure */ static struct fwnode_handle * -acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) +acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source, + u32 gsi) { struct fwnode_handle *result; struct acpi_device *device; @@ -104,18 +118,18 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) acpi_status status; if (!source->string_length) - return acpi_gsi_domain_id; + return acpi_get_gsi_domain_id(gsi); status = acpi_get_handle(NULL, source->string_ptr, &handle); if (WARN_ON(ACPI_FAILURE(status))) return NULL; - device = acpi_bus_get_acpi_device(handle); + device = acpi_get_acpi_dev(handle); if (WARN_ON(!device)) return NULL; result = &device->fwnode; - acpi_bus_put_acpi_device(device); + acpi_put_acpi_dev(device); return result; } @@ -139,6 +153,7 @@ struct acpi_irq_parse_one_ctx { * @polarity: polarity attributes of hwirq * @polarity: polarity attributes of hwirq * @shareable: shareable attributes of hwirq + * @wake_capable: wake capable attribute of hwirq * @ctx: acpi_irq_parse_one_ctx updated by this function * * Description: @@ -148,12 +163,13 @@ struct acpi_irq_parse_one_ctx { static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode, u32 hwirq, u8 triggering, u8 polarity, u8 shareable, + u8 wake_capable, struct acpi_irq_parse_one_ctx *ctx) { if (!fwnode) return; ctx->rc = 0; - *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable); + *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable); ctx->fwspec->fwnode = fwnode; ctx->fwspec->param[0] = hwirq; ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity); @@ -193,10 +209,10 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= irq->interrupt_count; return AE_OK; } - fwnode = acpi_gsi_domain_id; + fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], irq->triggering, irq->polarity, - irq->sharable, ctx); + irq->shareable, irq->wake_capable, ctx); return AE_CTRL_TERMINATE; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: eirq = &ares->data.extended_irq; @@ -206,10 +222,11 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= eirq->interrupt_count; return AE_OK; } - fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source); + fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source, + eirq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], eirq->triggering, eirq->polarity, - eirq->sharable, ctx); + eirq->shareable, eirq->wake_capable, ctx); return AE_CTRL_TERMINATE; } @@ -283,15 +300,88 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) } EXPORT_SYMBOL_GPL(acpi_irq_get); +const struct cpumask *acpi_irq_get_affinity(acpi_handle handle, + unsigned int index) +{ + struct irq_fwspec_info info; + struct irq_fwspec fwspec; + unsigned long flags; + + if (acpi_irq_parse_one(handle, index, &fwspec, &flags)) + return NULL; + + if (irq_populate_fwspec_info(&fwspec, &info)) + return NULL; + + if (!(info.flags & IRQ_FWSPEC_INFO_AFFINITY_VALID)) + return NULL; + + return info.affinity; +} + /** * acpi_set_irq_model - Setup the GSI irqdomain information * @model: the value assigned to acpi_irq_model - * @fwnode: the irq_domain identifier for mapping and looking up - * GSI interrupts + * @fn: a dispatcher function that will return the domain fwnode + * for a given GSI */ void __init acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *fwnode) + acpi_gsi_domain_disp_fn fn) { acpi_irq_model = model; - acpi_gsi_domain_id = fwnode; + acpi_get_gsi_domain_id = fn; +} + +/* + * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function + * + * Return the dispatcher function that computes the domain fwnode for + * a given GSI. + */ +acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void) +{ + return acpi_get_gsi_domain_id; +} +EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher); + +/** + * acpi_set_gsi_to_irq_fallback - Register a GSI transfer + * callback to fallback to arch specified implementation. + * @fn: arch-specific fallback handler + */ +void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32)) +{ + acpi_gsi_to_irq_fallback = fn; +} + +/** + * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default + * GSI domain as its parent. + * @flags: Irq domain flags associated with the domain + * @size: Size of the domain. + * @fwnode: Optional fwnode of the interrupt controller + * @ops: Pointer to the interrupt domain callbacks + * @host_data: Controller private data pointer + */ +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain *d; + + /* This only works for the GIC model... */ + if (acpi_irq_model != ACPI_IRQ_MODEL_GIC) + return NULL; + + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0), + DOMAIN_BUS_ANY); + + if (!d) + return NULL; + + return irq_domain_create_hierarchy(d, flags, size, fwnode, ops, + host_data); } +EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy); diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c new file mode 100644 index 000000000000..5b85989f96be --- /dev/null +++ b/drivers/acpi/mipi-disco-img.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MIPI DisCo for Imaging support. + * + * Copyright (C) 2023 Intel Corporation + * + * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in + * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource + * Descriptor" of ACPI 6.5 and using device properties defined by the MIPI DisCo + * for Imaging specification. + * + * The implementation looks for the information in the ACPI namespace (CSI-2 + * resource descriptors in _CRS) and constructs software nodes compatible with + * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2 + * connection graph. The software nodes are then populated with the data + * extracted from the _CRS CSI-2 resource descriptors and the MIPI DisCo + * for Imaging device properties present in _DSD for the ACPI device objects + * with CSI-2 connections. + */ + +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/overflow.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <media/v4l2-fwnode.h> + +#include "internal.h" + +static LIST_HEAD(acpi_mipi_crs_csi2_list); + +static void acpi_mipi_data_tag(acpi_handle handle, void *context) +{ +} + +/* Connection data extracted from one _CRS CSI-2 resource descriptor. */ +struct crs_csi2_connection { + struct list_head entry; + struct acpi_resource_csi2_serialbus csi2_data; + acpi_handle remote_handle; + char remote_name[]; +}; + +/* Data extracted from _CRS CSI-2 resource descriptors for one device. */ +struct crs_csi2 { + struct list_head entry; + acpi_handle handle; + struct acpi_device_software_nodes *swnodes; + struct list_head connections; + u32 port_count; +}; + +struct csi2_resources_walk_data { + acpi_handle handle; + struct list_head connections; +}; + +static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context) +{ + struct csi2_resources_walk_data *crwd = context; + struct acpi_resource_csi2_serialbus *csi2_res; + struct acpi_resource_source *csi2_res_src; + u16 csi2_res_src_length; + struct crs_csi2_connection *conn; + acpi_handle remote_handle; + + if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return AE_OK; + + csi2_res = &res->data.csi2_serial_bus; + + if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2) + return AE_OK; + + csi2_res_src = &csi2_res->resource_source; + if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr, + &remote_handle))) { + acpi_handle_debug(crwd->handle, + "unable to find resource source\n"); + return AE_OK; + } + csi2_res_src_length = csi2_res_src->string_length; + if (!csi2_res_src_length) { + acpi_handle_debug(crwd->handle, + "invalid resource source string length\n"); + return AE_OK; + } + + conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1), + GFP_KERNEL); + if (!conn) + return AE_OK; + + conn->csi2_data = *csi2_res; + strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length); + conn->csi2_data.resource_source.string_ptr = conn->remote_name; + conn->remote_handle = remote_handle; + + list_add(&conn->entry, &crwd->connections); + + return AE_OK; +} + +static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle, + struct list_head *list) +{ + struct crs_csi2 *csi2; + + csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL); + if (!csi2) + return NULL; + + csi2->handle = handle; + INIT_LIST_HEAD(&csi2->connections); + csi2->port_count = 1; + + if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) { + kfree(csi2); + return NULL; + } + + list_add(&csi2->entry, list); + + return csi2; +} + +static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle) +{ + struct crs_csi2 *csi2; + + if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag, + (void **)&csi2, NULL))) + return NULL; + + return csi2; +} + +static void csi_csr2_release_connections(struct list_head *list) +{ + struct crs_csi2_connection *conn, *conn_tmp; + + list_for_each_entry_safe(conn, conn_tmp, list, entry) { + list_del(&conn->entry); + kfree(conn); + } +} + +static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2) +{ + list_del(&csi2->entry); + acpi_detach_data(csi2->handle, acpi_mipi_data_tag); + kfree(csi2->swnodes); + csi_csr2_release_connections(&csi2->connections); + kfree(csi2); +} + +/** + * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS + * @handle: Device object handle to evaluate _CRS for. + * + * Find all CSI-2 resource descriptors in the given device's _CRS + * and collect them into a list. + */ +void acpi_mipi_check_crs_csi2(acpi_handle handle) +{ + struct csi2_resources_walk_data crwd = { + .handle = handle, + .connections = LIST_HEAD_INIT(crwd.connections), + }; + struct crs_csi2 *csi2; + + /* + * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2 + * resource descriptions in _CRS to reduce overhead. + */ + acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd); + if (list_empty(&crwd.connections)) + return; + + /* + * Create a _CRS CSI-2 entry to store the extracted connection + * information and add it to the global list. + */ + csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list); + if (!csi2) { + csi_csr2_release_connections(&crwd.connections); + return; /* Nothing really can be done about this. */ + } + + list_replace(&crwd.connections, &csi2->connections); +} + +#define NO_CSI2_PORT (UINT_MAX - 1) + +static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + size_t port_count = csi2->port_count; + struct acpi_device_software_nodes *swnodes; + size_t alloc_size; + unsigned int i; + + /* + * Allocate memory for ports, node pointers (number of nodes + + * 1 (guardian), nodes (root + number of ports * 2 (because for + * every port there is an endpoint)). + */ + if (check_mul_overflow(sizeof(*swnodes->ports) + + sizeof(*swnodes->nodes) * 2 + + sizeof(*swnodes->nodeptrs) * 2, + port_count, &alloc_size) || + check_add_overflow(sizeof(*swnodes) + + sizeof(*swnodes->nodes) + + sizeof(*swnodes->nodeptrs) * 2, + alloc_size, &alloc_size)) { + acpi_handle_info(csi2->handle, + "too many _CRS CSI-2 resource handles (%zu)", + port_count); + return; + } + + swnodes = kmalloc(alloc_size, GFP_KERNEL); + if (!swnodes) + return; + + swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1); + swnodes->nodes = (struct software_node *)(swnodes->ports + port_count); + swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 + + 2 * port_count); + swnodes->num_ports = port_count; + + for (i = 0; i < 2 * port_count + 1; i++) + swnodes->nodeptrs[i] = &swnodes->nodes[i]; + + swnodes->nodeptrs[i] = NULL; + + for (i = 0; i < port_count; i++) + swnodes->ports[i].port_nr = NO_CSI2_PORT; + + csi2->swnodes = swnodes; +} + +#define ACPI_CRS_CSI2_PHY_TYPE_C 0 +#define ACPI_CRS_CSI2_PHY_TYPE_D 1 + +static unsigned int next_csi2_port_index(struct acpi_device_software_nodes *swnodes, + unsigned int port_nr) +{ + unsigned int i; + + for (i = 0; i < swnodes->num_ports; i++) { + struct acpi_device_software_node_port *port = &swnodes->ports[i]; + + if (port->port_nr == port_nr) + return i; + + if (port->port_nr == NO_CSI2_PORT) { + port->port_nr = port_nr; + return i; + } + } + + return NO_CSI2_PORT; +} + +/* Print graph port name into a buffer, return non-zero on failure. */ +#define GRAPH_PORT_NAME(var, num) \ + (snprintf((var), sizeof(var), SWNODE_GRAPH_PORT_NAME_FMT, (num)) >= \ + sizeof(var)) + +static void extract_crs_csi2_conn_info(acpi_handle local_handle, + struct acpi_device_software_nodes *local_swnodes, + struct crs_csi2_connection *conn) +{ + struct crs_csi2 *remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); + struct acpi_device_software_nodes *remote_swnodes; + struct acpi_device_software_node_port *local_port, *remote_port; + struct software_node *local_node, *remote_node; + unsigned int local_index, remote_index; + unsigned int bus_type; + + /* + * If the previous steps have failed to make room for a _CRS CSI-2 + * representation for the remote end of the given connection, skip it. + */ + if (!remote_csi2) + return; + + remote_swnodes = remote_csi2->swnodes; + if (!remote_swnodes) + return; + + switch (conn->csi2_data.phy_type) { + case ACPI_CRS_CSI2_PHY_TYPE_C: + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_CPHY; + break; + + case ACPI_CRS_CSI2_PHY_TYPE_D: + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_DPHY; + break; + + default: + acpi_handle_info(local_handle, "unknown CSI-2 PHY type %u\n", + conn->csi2_data.phy_type); + return; + } + + local_index = next_csi2_port_index(local_swnodes, + conn->csi2_data.local_port_instance); + if (WARN_ON_ONCE(local_index >= local_swnodes->num_ports)) + return; + + remote_index = next_csi2_port_index(remote_swnodes, + conn->csi2_data.resource_source.index); + if (WARN_ON_ONCE(remote_index >= remote_swnodes->num_ports)) + return; + + local_port = &local_swnodes->ports[local_index]; + local_node = &local_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(local_index)]; + local_port->crs_csi2_local = true; + + remote_port = &remote_swnodes->ports[remote_index]; + remote_node = &remote_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(remote_index)]; + + local_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(remote_node); + remote_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(local_node); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] = + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", + local_port->remote_ep); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] = + PROPERTY_ENTRY_U32("bus-type", bus_type); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] = + PROPERTY_ENTRY_U32("reg", 0); + + local_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] = + PROPERTY_ENTRY_U32("reg", conn->csi2_data.local_port_instance); + + if (GRAPH_PORT_NAME(local_port->port_name, + conn->csi2_data.local_port_instance)) + acpi_handle_info(local_handle, "local port %u name too long", + conn->csi2_data.local_port_instance); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] = + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", + remote_port->remote_ep); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] = + PROPERTY_ENTRY_U32("bus-type", bus_type); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] = + PROPERTY_ENTRY_U32("reg", 0); + + remote_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] = + PROPERTY_ENTRY_U32("reg", conn->csi2_data.resource_source.index); + + if (GRAPH_PORT_NAME(remote_port->port_name, + conn->csi2_data.resource_source.index)) + acpi_handle_info(local_handle, "remote port %u name too long", + conn->csi2_data.resource_source.index); +} + +static void prepare_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + struct acpi_device_software_nodes *local_swnodes = csi2->swnodes; + acpi_handle local_handle = csi2->handle; + struct crs_csi2_connection *conn; + + /* Bail out if the allocation of swnodes has failed. */ + if (!local_swnodes) + return; + + list_for_each_entry(conn, &csi2->connections, entry) + extract_crs_csi2_conn_info(local_handle, local_swnodes, conn); +} + +/** + * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes + * + * Note that this function must be called before any struct acpi_device objects + * are bound to any ACPI drivers or scan handlers, so it cannot assume the + * existence of struct acpi_device objects for every device present in the ACPI + * namespace. + * + * acpi_scan_lock in scan.c must be held when calling this function. + */ +void acpi_mipi_scan_crs_csi2(void) +{ + struct crs_csi2 *csi2; + LIST_HEAD(aux_list); + + /* Count references to each ACPI handle in the CSI-2 connection graph. */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) { + struct crs_csi2_connection *conn; + + list_for_each_entry(conn, &csi2->connections, entry) { + struct crs_csi2 *remote_csi2; + + csi2->port_count++; + + remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); + if (remote_csi2) { + remote_csi2->port_count++; + continue; + } + /* + * The remote endpoint has no _CRS CSI-2 list entry yet, + * so create one for it and add it to the list. + */ + acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list); + } + } + list_splice(&aux_list, &acpi_mipi_crs_csi2_list); + + /* + * Allocate software nodes for representing the CSI-2 information. + * + * This needs to be done for all of the list entries in one go, because + * they may point to each other without restrictions and the next step + * relies on the availability of swnodes memory for each list entry. + */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) + alloc_crs_csi2_swnodes(csi2); + + /* + * Set up software node properties using data from _CRS CSI-2 resource + * descriptors. + */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) + prepare_crs_csi2_swnodes(csi2); +} + +/* + * Get the index of the next property in the property array, with a given + * maximum value. + */ +#define NEXT_PROPERTY(index, max) \ + (WARN_ON((index) > ACPI_DEVICE_SWNODE_##max) ? \ + ACPI_DEVICE_SWNODE_##max : (index)++) + +static void init_csi2_port_local(struct acpi_device *adev, + struct acpi_device_software_node_port *port, + struct fwnode_handle *port_fwnode, + unsigned int index) +{ + acpi_handle handle = acpi_device_handle(adev); + unsigned int num_link_freqs; + int ret; + + ret = fwnode_property_count_u64(port_fwnode, "mipi-img-link-frequencies"); + if (ret <= 0) + return; + + num_link_freqs = ret; + if (num_link_freqs > ACPI_DEVICE_CSI2_DATA_LANES) { + acpi_handle_info(handle, "Too many link frequencies: %u\n", + num_link_freqs); + num_link_freqs = ACPI_DEVICE_CSI2_DATA_LANES; + } + + ret = fwnode_property_read_u64_array(port_fwnode, + "mipi-img-link-frequencies", + port->link_frequencies, + num_link_freqs); + if (ret) { + acpi_handle_info(handle, "Unable to get link frequencies (%d)\n", + ret); + return; + } + + port->ep_props[NEXT_PROPERTY(index, EP_LINK_FREQUENCIES)] = + PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies", + port->link_frequencies, + num_link_freqs); +} + +static void init_csi2_port(struct acpi_device *adev, + struct acpi_device_software_nodes *swnodes, + struct acpi_device_software_node_port *port, + struct fwnode_handle *port_fwnode, + unsigned int port_index) +{ + unsigned int ep_prop_index = ACPI_DEVICE_SWNODE_EP_CLOCK_LANES; + acpi_handle handle = acpi_device_handle(adev); + u8 val[ACPI_DEVICE_CSI2_DATA_LANES]; + int num_lanes = 0; + int ret; + + if (GRAPH_PORT_NAME(port->port_name, port->port_nr)) + return; + + swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)] = + SOFTWARE_NODE(port->port_name, port->port_props, + &swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT]); + + ret = fwnode_property_read_u8(port_fwnode, "mipi-img-clock-lane", val); + if (!ret) + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_CLOCK_LANES)] = + PROPERTY_ENTRY_U32("clock-lanes", val[0]); + + ret = fwnode_property_count_u8(port_fwnode, "mipi-img-data-lanes"); + if (ret > 0) { + num_lanes = ret; + + if (num_lanes > ACPI_DEVICE_CSI2_DATA_LANES) { + acpi_handle_info(handle, "Too many data lanes: %u\n", + num_lanes); + num_lanes = ACPI_DEVICE_CSI2_DATA_LANES; + } + + ret = fwnode_property_read_u8_array(port_fwnode, + "mipi-img-data-lanes", + val, num_lanes); + if (!ret) { + unsigned int i; + + for (i = 0; i < num_lanes; i++) + port->data_lanes[i] = val[i]; + + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_DATA_LANES)] = + PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes", + port->data_lanes, + num_lanes); + } + } + + ret = fwnode_property_count_u8(port_fwnode, "mipi-img-lane-polarities"); + if (ret < 0) { + acpi_handle_debug(handle, "Lane polarity bytes missing\n"); + } else if (ret * BITS_PER_TYPE(u8) < num_lanes + 1) { + acpi_handle_info(handle, "Too few lane polarity bits (%zu vs. %d)\n", + ret * BITS_PER_TYPE(u8), num_lanes + 1); + } else { + unsigned long mask = 0; + int byte_count = ret; + unsigned int i; + + /* + * The total number of lanes is ACPI_DEVICE_CSI2_DATA_LANES + 1 + * (data lanes + clock lane). It is not expected to ever be + * greater than the number of bits in an unsigned long + * variable, but ensure that this is the case. + */ + BUILD_BUG_ON(BITS_PER_TYPE(unsigned long) <= ACPI_DEVICE_CSI2_DATA_LANES); + + if (byte_count > sizeof(mask)) { + acpi_handle_info(handle, "Too many lane polarities: %d\n", + byte_count); + byte_count = sizeof(mask); + } + fwnode_property_read_u8_array(port_fwnode, "mipi-img-lane-polarities", + val, byte_count); + + for (i = 0; i < byte_count; i++) + mask |= (unsigned long)val[i] << BITS_PER_TYPE(u8) * i; + + for (i = 0; i <= num_lanes; i++) + port->lane_polarities[i] = test_bit(i, &mask); + + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_LANE_POLARITIES)] = + PROPERTY_ENTRY_U32_ARRAY_LEN("lane-polarities", + port->lane_polarities, + num_lanes + 1); + } + + swnodes->nodes[ACPI_DEVICE_SWNODE_EP(port_index)] = + SOFTWARE_NODE("endpoint@0", swnodes->ports[port_index].ep_props, + &swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)]); + + if (port->crs_csi2_local) + init_csi2_port_local(adev, port, port_fwnode, ep_prop_index); +} + +#define MIPI_IMG_PORT_PREFIX "mipi-img-port-" + +static struct fwnode_handle *get_mipi_port_handle(struct fwnode_handle *adev_fwnode, + unsigned int port_nr) +{ + char port_name[sizeof(MIPI_IMG_PORT_PREFIX) + 2]; + + if (snprintf(port_name, sizeof(port_name), "%s%u", + MIPI_IMG_PORT_PREFIX, port_nr) >= sizeof(port_name)) + return NULL; + + return fwnode_get_named_child_node(adev_fwnode, port_name); +} + +static void init_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER }; + struct acpi_device_software_nodes *swnodes = csi2->swnodes; + acpi_handle handle = csi2->handle; + unsigned int prop_index = 0; + struct fwnode_handle *adev_fwnode; + struct acpi_device *adev; + acpi_status status; + unsigned int i; + u32 val; + int ret; + + /* + * Bail out if the swnodes are not available (either they have not been + * allocated or they have been assigned to the device already). + */ + if (!swnodes) + return; + + adev = acpi_fetch_acpi_dev(handle); + if (!adev) + return; + + adev_fwnode = acpi_fwnode_handle(adev); + + /* + * If the "rotation" property is not present, but _PLD is there, + * evaluate it to get the "rotation" value. + */ + if (!fwnode_property_present(adev_fwnode, "rotation")) { + struct acpi_pld_info *pld; + + if (acpi_get_physical_device_location(handle, &pld)) { + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] = + PROPERTY_ENTRY_U32("rotation", + pld->rotation * 45U); + kfree(pld); + } + } + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-clock-frequency", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_CLOCK_FREQUENCY)] = + PROPERTY_ENTRY_U32("clock-frequency", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-led-max-current", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_LED_MAX_MICROAMP)] = + PROPERTY_ENTRY_U32("led-max-microamp", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-current", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_MICROAMP)] = + PROPERTY_ENTRY_U32("flash-max-microamp", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-timeout-us", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_TIMEOUT_US)] = + PROPERTY_ENTRY_U32("flash-max-timeout-us", val); + + status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_info(handle, "Unable to get the path name\n"); + return; + } + + swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT] = + SOFTWARE_NODE(buffer.pointer, swnodes->dev_props, NULL); + + for (i = 0; i < swnodes->num_ports; i++) { + struct acpi_device_software_node_port *port = &swnodes->ports[i]; + struct fwnode_handle *port_fwnode; + + /* + * The MIPI DisCo for Imaging specification defines _DSD device + * properties for providing CSI-2 port parameters that can be + * accessed through the generic device properties framework. To + * access them, it is first necessary to find the data node + * representing the port under the given ACPI device object. + */ + port_fwnode = get_mipi_port_handle(adev_fwnode, port->port_nr); + if (!port_fwnode) { + acpi_handle_info(handle, + "MIPI port name too long for port %u\n", + port->port_nr); + continue; + } + + init_csi2_port(adev, swnodes, port, port_fwnode, i); + + fwnode_handle_put(port_fwnode); + } + + ret = software_node_register_node_group(swnodes->nodeptrs); + if (ret < 0) { + acpi_handle_info(handle, + "Unable to register software nodes (%d)\n", ret); + return; + } + + adev->swnodes = swnodes; + adev_fwnode->secondary = software_node_fwnode(swnodes->nodes); + + /* + * Prevents the swnodes from this csi2 entry from being assigned again + * or freed prematurely. + */ + csi2->swnodes = NULL; +} + +/** + * acpi_mipi_init_crs_csi2_swnodes - Initialize _CRS CSI-2 software nodes + * + * Use MIPI DisCo for Imaging device properties to finalize the initialization + * of CSI-2 software nodes for all ACPI device objects that have been already + * enumerated. + */ +void acpi_mipi_init_crs_csi2_swnodes(void) +{ + struct crs_csi2 *csi2, *csi2_tmp; + + list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) + init_crs_csi2_swnodes(csi2); +} + +/** + * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data + */ +void acpi_mipi_crs_csi2_cleanup(void) +{ + struct crs_csi2 *csi2, *csi2_tmp; + + list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) + acpi_mipi_del_crs_csi2(csi2); +} + +#ifdef CONFIG_X86 +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */ +static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = { + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), + {} +}; + +static const char *strnext(const char *s1, const char *s2) +{ + s1 = strstr(s1, s2); + + if (!s1) + return NULL; + + return s1 + strlen(s2); +} + +/** + * acpi_graph_ignore_port - Tell whether a port node should be ignored + * @handle: The ACPI handle of the node (which may be a port node) + * + * Return: true if a port node should be ignored and the data to that should + * come from other sources instead (Windows ACPI definitions and + * ipu-bridge). This is currently used to ignore bad port nodes related to IPU6 + * ("IPU?") and camera sensor devices ("LNK?") in certain Dell systems with + * Intel VSC. + */ +bool acpi_graph_ignore_port(acpi_handle handle) +{ + const char *path = NULL, *orig_path; + static bool dmi_tested, ignore_port; + + if (!dmi_tested) { + if (dmi_name_in_vendors("Dell Inc.") && + x86_match_cpu(dell_broken_mipi_disco_cpu_gens)) + ignore_port = true; + + dmi_tested = true; + } + + if (!ignore_port) + return false; + + /* Check if the device is either "IPU" or "LNK" (sensor). */ + orig_path = acpi_handle_path(handle); + if (!orig_path) + return false; + path = strnext(orig_path, "IPU"); + if (!path) + path = strnext(orig_path, "LNK"); + if (!path) + goto out_free; + + if (!(isdigit(path[0]) && path[1] == '.')) + goto out_free; + + /* Check if the node has a "PRT" prefix. */ + path = strnext(path, "PRT"); + if (path && isdigit(path[0]) && !path[1]) { + acpi_handle_debug(handle, "ignoring data node\n"); + + kfree(orig_path); + return true; + } + +out_free: + kfree(orig_path); + return false; +} +#endif diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile index 751081c47886..07f53c4e85fe 100644 --- a/drivers/acpi/nfit/Makefile +++ b/drivers/acpi/nfit/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_ACPI_NFIT) := nfit.o nfit-y := core.o nfit-y += intel.o diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 5143e11e3b0f..3eb56b77cb6d 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1,18 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #include <linux/list_sort.h> #include <linux/libnvdimm.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/mutex.h> #include <linux/ndctl.h> #include <linux/sysfs.h> @@ -55,6 +48,10 @@ static bool no_init_ars; module_param(no_init_ars, bool, 0644); MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); +static bool force_labels; +module_param(force_labels, bool, 0444); +MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods"); + LIST_HEAD(acpi_descs); DEFINE_MUTEX(acpi_desc_lock); @@ -77,6 +74,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) } EXPORT_SYMBOL(to_nfit_uuid); +static const guid_t *to_nfit_bus_uuid(int family) +{ + if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, + "only secondary bus families can be translated\n")) + return NULL; + /* + * The index of bus UUIDs starts immediately following the last + * NVDIMM/leaf family. + */ + return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); +} + static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; @@ -274,18 +283,19 @@ err: static union acpi_object *int_to_buf(union acpi_object *integer) { - union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4); + union acpi_object *buf = NULL; void *dst = NULL; - if (!buf) - goto err; - if (integer->type != ACPI_TYPE_INTEGER) { WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", integer->type); goto err; } + buf = ACPI_ALLOCATE(sizeof(*buf) + 4); + if (!buf) + goto err; + dst = buf + 1; buf->type = ACPI_TYPE_BUFFER; buf->buffer.length = 4; @@ -364,33 +374,17 @@ static union acpi_object *acpi_label_info(acpi_handle handle) static u8 nfit_dsm_revid(unsigned family, unsigned func) { - static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { + static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { - [NVDIMM_INTEL_GET_MODES] = 2, - [NVDIMM_INTEL_GET_FWINFO] = 2, - [NVDIMM_INTEL_START_FWUPDATE] = 2, - [NVDIMM_INTEL_SEND_FWUPDATE] = 2, - [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, - [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, - [NVDIMM_INTEL_SET_THRESHOLD] = 2, - [NVDIMM_INTEL_INJECT_ERROR] = 2, - [NVDIMM_INTEL_GET_SECURITY_STATE] = 2, - [NVDIMM_INTEL_SET_PASSPHRASE] = 2, - [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2, - [NVDIMM_INTEL_UNLOCK_UNIT] = 2, - [NVDIMM_INTEL_FREEZE_LOCK] = 2, - [NVDIMM_INTEL_SECURE_ERASE] = 2, - [NVDIMM_INTEL_OVERWRITE] = 2, - [NVDIMM_INTEL_QUERY_OVERWRITE] = 2, - [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2, - [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2, + [NVDIMM_INTEL_GET_MODES ... + NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, }, }; u8 id; if (family > NVDIMM_FAMILY_MAX) return 0; - if (func > 31) + if (func > NVDIMM_CMD_MAX) return 0; id = revid_table[family][func]; if (id == 0) @@ -409,6 +403,37 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) return true; } +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, + struct nd_cmd_pkg *call_pkg, int *family) +{ + if (call_pkg) { + int i; + + if (nfit_mem && nfit_mem->family != call_pkg->nd_family) + return -ENOTTY; + + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) + if (call_pkg->nd_reserved2[i]) + return -EINVAL; + *family = call_pkg->nd_family; + return call_pkg->nd_command; + } + + /* In the !call_pkg case, bus commands == bus functions */ + if (!nfit_mem) + return cmd; + + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) + return cmd; + + /* + * Force function number validation to fail since 0 is never + * published as a valid function in dsm_mask. + */ + return 0; +} + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { @@ -422,29 +447,29 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned long cmd_mask, dsm_mask; u32 offset, fw_status = 0; acpi_handle handle; - unsigned int func; const guid_t *guid; - int rc, i; + int func, rc, i; + int family = 0; if (cmd_rc) *cmd_rc = -EINVAL; - func = cmd; + if (cmd == ND_CMD_CALL) { - call_pkg = buf; - func = call_pkg->nd_command; + if (!buf || buf_len < sizeof(*call_pkg)) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) - if (call_pkg->nd_reserved2[i]) - return -EINVAL; + call_pkg = buf; } + func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); + if (func < 0) + return func; + if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; - if (call_pkg && nfit_mem->family != call_pkg->nd_family) - return -ENOTTY; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); @@ -458,11 +483,20 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = cmd_mask; - if (cmd == ND_CMD_CALL) - dsm_mask = nd_desc->bus_dsm_mask; + if (cmd == ND_CMD_CALL && call_pkg->nd_family) { + family = call_pkg->nd_family; + if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX || + !test_bit(family, &nd_desc->bus_family_mask)) + return -EINVAL; + family = array_index_nospec(family, + NVDIMM_BUS_FAMILY_MAX + 1); + dsm_mask = acpi_desc->family_dsm_mask[family]; + guid = to_nfit_bus_uuid(family); + } else { + dsm_mask = acpi_desc->bus_dsm_mask; + guid = to_nfit_uuid(NFIT_DEV_BUS); + } desc = nd_cmd_bus_desc(cmd); - guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; dimm_name = "bus"; } @@ -470,7 +504,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) + /* + * Check for a valid command. For ND_CMD_CALL, we also have to + * make sure that the DSM function is supported. + */ + if (cmd == ND_CMD_CALL && + (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) + return -ENOTTY; + else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; @@ -491,8 +532,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, in_buf.buffer.length = call_pkg->nd_size_in; } - dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", - dimm_name, cmd, func, in_buf.buffer.length); + dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n", + dimm_name, cmd, family, func, in_buf.buffer.length); if (payload_dumpable(nvdimm, func)) print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, @@ -528,6 +569,19 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return -EINVAL; } + if (out_obj->type != ACPI_TYPE_BUFFER) { + dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", + dimm_name, cmd_name, out_obj->type); + rc = -EINVAL; + goto out; + } + + dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, + cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, + out_obj->buffer.pointer, + min_t(u32, 128, out_obj->buffer.length), true); + if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, @@ -546,19 +600,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return 0; } - if (out_obj->package.type != ACPI_TYPE_BUFFER) { - dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", - dimm_name, cmd_name, out_obj->type); - rc = -EINVAL; - goto out; - } - - dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, - cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, - out_obj->buffer.pointer, - min_t(u32, 128, out_obj->buffer.length), true); - for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer, @@ -642,14 +683,23 @@ static const char *spa_type_name(u16 type) int nfit_spa_type(struct acpi_nfit_system_address *spa) { + guid_t guid; int i; + import_guid(&guid, spa->range_guid); for (i = 0; i < NFIT_UUID_MAX; i++) - if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) + if (guid_equal(to_nfit_uuid(i), &guid)) return i; return -1; } +static size_t sizeof_spa(struct acpi_nfit_system_address *spa) +{ + if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID) + return sizeof(*spa); + return sizeof(*spa) - 8; +} + static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) @@ -657,22 +707,22 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; - if (spa->header.length != sizeof(*spa)) + if (spa->header.length != sizeof_spa(spa)) return false; list_for_each_entry(nfit_spa, &prev->spas, list) { - if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { + if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } } - nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), + nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa), GFP_KERNEL); if (!nfit_spa) return false; INIT_LIST_HEAD(&nfit_spa->list); - memcpy(nfit_spa->spa, spa, sizeof(*spa)); + memcpy(nfit_spa->spa, spa, sizeof_spa(spa)); list_add_tail(&nfit_spa->list, &acpi_desc->spas); dev_dbg(dev, "spa index: %d type: %s\n", spa->range_index, @@ -810,7 +860,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt) { if (idt->header.length < sizeof(*idt)) return 0; - return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); + return sizeof(*idt) + sizeof(u32) * idt->line_count; } static bool add_idt(struct acpi_nfit_desc *acpi_desc, @@ -849,7 +899,7 @@ static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) { if (flush->header.length < sizeof(*flush)) return 0; - return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); + return struct_size(flush, hint_address, flush->hint_count); } static bool add_flush(struct acpi_nfit_desc *acpi_desc, @@ -954,80 +1004,6 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, return table + hdr->length; } -static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem) -{ - u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; - u16 dcr = nfit_mem->dcr->region_index; - struct nfit_spa *nfit_spa; - - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - u16 range_index = nfit_spa->spa->range_index; - int type = nfit_spa_type(nfit_spa->spa); - struct nfit_memdev *nfit_memdev; - - if (type != NFIT_SPA_BDW) - continue; - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - if (nfit_memdev->memdev->range_index != range_index) - continue; - if (nfit_memdev->memdev->device_handle != device_handle) - continue; - if (nfit_memdev->memdev->region_index != dcr) - continue; - - nfit_mem->spa_bdw = nfit_spa->spa; - return; - } - } - - dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", - nfit_mem->spa_dcr->range_index); - nfit_mem->bdw = NULL; -} - -static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) -{ - u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; - struct nfit_memdev *nfit_memdev; - struct nfit_bdw *nfit_bdw; - struct nfit_idt *nfit_idt; - u16 idt_idx, range_index; - - list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { - if (nfit_bdw->bdw->region_index != dcr) - continue; - nfit_mem->bdw = nfit_bdw->bdw; - break; - } - - if (!nfit_mem->bdw) - return; - - nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); - - if (!nfit_mem->spa_bdw) - return; - - range_index = nfit_mem->spa_bdw->range_index; - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { - if (nfit_memdev->memdev->range_index != range_index || - nfit_memdev->memdev->region_index != dcr) - continue; - nfit_mem->memdev_bdw = nfit_memdev->memdev; - idt_idx = nfit_memdev->memdev->interleave_index; - list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { - if (nfit_idt->idt->interleave_index != idt_idx) - continue; - nfit_mem->idt_bdw = nfit_idt->idt; - break; - } - break; - } -} - static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa) { @@ -1144,7 +1120,6 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, nfit_mem->idt_dcr = nfit_idt->idt; break; } - nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); } else if (type == NFIT_SPA_PM) { /* * A single dimm may belong to multiple SPA-PM @@ -1159,7 +1134,8 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, return 0; } -static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) +static int nfit_mem_cmp(void *priv, const struct list_head *_a, + const struct list_head *_b) { struct nfit_mem *a = container_of(_a, typeof(*a), list); struct nfit_mem *b = container_of(_b, typeof(*b), list); @@ -1213,8 +1189,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev, { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); + return sysfs_emit(buf, "%#lx\n", acpi_desc->bus_dsm_mask); } static struct device_attribute dev_attr_bus_dsm_mask = __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); @@ -1226,7 +1203,7 @@ static ssize_t revision_show(struct device *dev, struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); + return sysfs_emit(buf, "%d\n", acpi_desc->acpi_header.revision); } static DEVICE_ATTR_RO(revision); @@ -1237,7 +1214,7 @@ static ssize_t hw_error_scrub_show(struct device *dev, struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%d\n", acpi_desc->scrub_mode); + return sysfs_emit(buf, "%d\n", acpi_desc->scrub_mode); } /* @@ -1291,19 +1268,30 @@ static ssize_t scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus_descriptor *nd_desc; + struct acpi_nfit_desc *acpi_desc; ssize_t rc = -ENXIO; + bool busy; device_lock(dev); nd_desc = dev_get_drvdata(dev); - if (nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + if (!nd_desc) { + device_unlock(dev); + return rc; + } + acpi_desc = to_acpi_desc(nd_desc); - mutex_lock(&acpi_desc->init_mutex); - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - acpi_desc->scrub_busy - && !acpi_desc->cancel ? "+\n" : "\n"); - mutex_unlock(&acpi_desc->init_mutex); + mutex_lock(&acpi_desc->init_mutex); + busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) + && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); + rc = sysfs_emit(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); + /* Allow an admin to poll the busy state at a higher rate */ + if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, + &acpi_desc->scrub_flags)) { + acpi_desc->scrub_tmo = 1; + mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); } + + mutex_unlock(&acpi_desc->init_mutex); device_unlock(dev); return rc; } @@ -1346,11 +1334,15 @@ static bool ars_supported(struct nvdimm_bus *nvdimm_bus) static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) - return 0; + if (a == &dev_attr_scrub.attr) + return ars_supported(nvdimm_bus) ? a->mode : 0; + + if (a == &dev_attr_firmware_activate_noidle.attr) + return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; + return a->mode; } @@ -1359,6 +1351,7 @@ static struct attribute *acpi_nfit_attributes[] = { &dev_attr_scrub.attr, &dev_attr_hw_error_scrub.attr, &dev_attr_bus_dsm_mask.attr, + &dev_attr_firmware_activate_noidle.attr, NULL, }; @@ -1369,7 +1362,6 @@ static const struct attribute_group acpi_nfit_attribute_group = { }; static const struct attribute_group *acpi_nfit_attribute_groups[] = { - &nvdimm_bus_attribute_group, &acpi_nfit_attribute_group, NULL, }; @@ -1395,7 +1387,7 @@ static ssize_t handle_show(struct device *dev, { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); - return sprintf(buf, "%#x\n", memdev->device_handle); + return sysfs_emit(buf, "%#x\n", memdev->device_handle); } static DEVICE_ATTR_RO(handle); @@ -1404,7 +1396,7 @@ static ssize_t phys_id_show(struct device *dev, { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); - return sprintf(buf, "%#x\n", memdev->physical_id); + return sysfs_emit(buf, "%#x\n", memdev->physical_id); } static DEVICE_ATTR_RO(phys_id); @@ -1413,7 +1405,7 @@ static ssize_t vendor_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); } static DEVICE_ATTR_RO(vendor); @@ -1422,7 +1414,7 @@ static ssize_t rev_id_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); } static DEVICE_ATTR_RO(rev_id); @@ -1431,7 +1423,7 @@ static ssize_t device_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); } static DEVICE_ATTR_RO(device); @@ -1440,7 +1432,7 @@ static ssize_t subsystem_vendor_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); } static DEVICE_ATTR_RO(subsystem_vendor); @@ -1449,7 +1441,7 @@ static ssize_t subsystem_rev_id_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_revision_id)); } static DEVICE_ATTR_RO(subsystem_rev_id); @@ -1459,7 +1451,7 @@ static ssize_t subsystem_device_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); } static DEVICE_ATTR_RO(subsystem_device); @@ -1470,8 +1462,6 @@ static int num_nvdimm_formats(struct nvdimm *nvdimm) if (nfit_mem->memdev_pmem) formats++; - if (nfit_mem->memdev_bdw) - formats++; return formats; } @@ -1480,7 +1470,7 @@ static ssize_t format_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); + return sysfs_emit(buf, "0x%04x\n", le16_to_cpu(dcr->code)); } static DEVICE_ATTR_RO(format); @@ -1513,11 +1503,11 @@ static ssize_t format1_show(struct device *dev, continue; if (nfit_dcr->dcr->code == dcr->code) continue; - rc = sprintf(buf, "0x%04x\n", + rc = sysfs_emit(buf, "0x%04x\n", le16_to_cpu(nfit_dcr->dcr->code)); break; } - if (rc != ENXIO) + if (rc != -ENXIO) break; } mutex_unlock(&acpi_desc->init_mutex); @@ -1530,7 +1520,7 @@ static ssize_t formats_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); - return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); + return sysfs_emit(buf, "%d\n", num_nvdimm_formats(nvdimm)); } static DEVICE_ATTR_RO(formats); @@ -1539,7 +1529,7 @@ static ssize_t serial_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); + return sysfs_emit(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); } static DEVICE_ATTR_RO(serial); @@ -1551,7 +1541,7 @@ static ssize_t family_show(struct device *dev, if (nfit_mem->family < 0) return -ENXIO; - return sprintf(buf, "%d\n", nfit_mem->family); + return sysfs_emit(buf, "%d\n", nfit_mem->family); } static DEVICE_ATTR_RO(family); @@ -1563,7 +1553,7 @@ static ssize_t dsm_mask_show(struct device *dev, if (nfit_mem->family < 0) return -ENXIO; - return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); + return sysfs_emit(buf, "%#lx\n", nfit_mem->dsm_mask); } static DEVICE_ATTR_RO(dsm_mask); @@ -1577,7 +1567,7 @@ static ssize_t flags_show(struct device *dev, if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) flags |= ACPI_NFIT_MEM_FLUSH_FAILED; - return sprintf(buf, "%s%s%s%s%s%s%s\n", + return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", @@ -1594,7 +1584,7 @@ static ssize_t id_show(struct device *dev, struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - return sprintf(buf, "%s\n", nfit_mem->id); + return sysfs_emit(buf, "%s\n", nfit_mem->id); } static DEVICE_ATTR_RO(id); @@ -1604,7 +1594,7 @@ static ssize_t dirty_shutdown_show(struct device *dev, struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); + return sysfs_emit(buf, "%d\n", nfit_mem->dirty_shutdown); } static DEVICE_ATTR_RO(dirty_shutdown); @@ -1632,7 +1622,7 @@ static struct attribute *acpi_nfit_dimm_attributes[] = { static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); @@ -1663,8 +1653,6 @@ static const struct attribute_group acpi_nfit_dimm_attribute_group = { }; static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { - &nvdimm_attribute_group, - &nd_device_attribute_group, &acpi_nfit_dimm_attribute_group, NULL, }; @@ -1733,14 +1721,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) { + struct device *dev = &nfit_mem->adev->dev; struct nd_intel_smart smart = { 0 }; union acpi_object in_buf = { - .type = ACPI_TYPE_BUFFER, - .buffer.pointer = (char *) &smart, - .buffer.length = sizeof(smart), + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 0, }; union acpi_object in_obj = { - .type = ACPI_TYPE_PACKAGE, + .package.type = ACPI_TYPE_PACKAGE, .package.count = 1, .package.elements = &in_buf, }; @@ -1754,9 +1742,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) if ((nfit_mem->dsm_mask & (1 << func)) == 0) return; - out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); - if (!out_obj) + out_obj = acpi_evaluate_dsm_typed(handle, guid, revid, func, &in_obj, ACPI_TYPE_BUFFER); + if (!out_obj || out_obj->buffer.length < sizeof(smart)) { + dev_dbg(dev->parent, "%s: failed to retrieve initial health\n", + dev_name(dev)); + ACPI_FREE(out_obj); return; + } + memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); + ACPI_FREE(out_obj); if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { if (smart.shutdown_state) @@ -1767,7 +1761,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); nfit_mem->dirty_shutdown = smart.shutdown_count; } - ACPI_FREE(out_obj); } static void populate_shutdown_status(struct nfit_mem *nfit_mem) @@ -1784,6 +1777,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem) static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask, label_mask; @@ -1795,6 +1789,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; + set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", @@ -1835,14 +1830,25 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* - * Until standardization materializes we need to consider 4 - * different command sets. Note, that checking for function0 (bit0) - * tells us if any commands are reachable through this GUID. + * There are 4 "legacy" NVDIMM command sets + * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before + * an EFI working group was established to constrain this + * proliferation. The nfit driver probes for the supported command + * set by GUID. Note, if you're a platform developer looking to add + * a new command set to this probe, consider using an existing set, + * or otherwise seek approval to publish the command set at + * http://www.uefi.org/RFIC_LIST. + * + * Note, that checking for function0 (bit0) tells us if any commands + * are reachable through this GUID. */ + clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { + set_bit(i, &nd_desc->dimm_family_mask); if (family < 0 || i == default_dsm_family) family = i; + } /* limit the supported commands to those that are publicly documented */ nfit_mem->family = family; @@ -1860,6 +1866,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dsm_mask &= ~(1 << 8); } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { dsm_mask = 0xffffffff; + } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { + dsm_mask = 0x1f; } else { dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; @@ -1867,6 +1875,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, return 0; } + /* + * Function 0 is the command interrogation function, don't + * export it to potential userspace use, and enable it to be + * used as an error value in acpi_nfit_ctl(). + */ + dsm_mask &= ~1UL; + guid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, guid, @@ -1882,18 +1897,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | 1 << ND_CMD_SET_CONFIG_DATA; if (family == NVDIMM_FAMILY_INTEL && (dsm_mask & label_mask) == label_mask) - return 0; + /* skip _LS{I,R,W} enabling */; + else { + if (acpi_nvdimm_has_method(adev_dimm, "_LSI") + && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { + dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); + set_bit(NFIT_MEM_LSR, &nfit_mem->flags); + } - if (acpi_nvdimm_has_method(adev_dimm, "_LSI") - && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { - dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); - set_bit(NFIT_MEM_LSR, &nfit_mem->flags); - } + if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) + && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { + dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); + set_bit(NFIT_MEM_LSW, &nfit_mem->flags); + } - if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) - && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { - dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); - set_bit(NFIT_MEM_LSW, &nfit_mem->flags); + /* + * Quirk read-only label configurations to preserve + * access to label-less namespaces by default. + */ + if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) + && !force_labels) { + dev_dbg(dev, "%s: No _LSW, disable labels\n", + dev_name(&adev_dimm->dev)); + clear_bit(NFIT_MEM_LSR, &nfit_mem->flags); + } else + dev_dbg(dev, "%s: Force enable labels\n", + dev_name(&adev_dimm->dev)); } populate_shutdown_status(nfit_mem); @@ -1937,6 +1966,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) } } +static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( + struct nfit_mem *nfit_mem) +{ + unsigned long mask; + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + if (!nd_desc->fw_ops) + return NULL; + + if (nfit_mem->family != NVDIMM_FAMILY_INTEL) + return NULL; + + mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; + if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + return NULL; + + return intel_fw_ops; +} + static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; @@ -1957,9 +2006,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) continue; } - if (nfit_mem->bdw && nfit_mem->memdev_pmem) - set_bit(NDD_ALIASING, &flags); - /* collate flags across all memdevs for this dimm */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *dimm_memdev; @@ -2007,7 +2053,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq, &nfit_mem->id[0], - acpi_nfit_get_security_ops(nfit_mem->family)); + acpi_nfit_get_security_ops(nfit_mem->family), + acpi_nfit_get_fw_ops(nfit_mem)); if (!nvdimm) return -ENOMEM; @@ -2017,7 +2064,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) continue; - dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", + dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", nvdimm_name(nvdimm), mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", @@ -2042,11 +2089,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if (!nvdimm) continue; - rc = nvdimm_security_setup_events(nvdimm); - if (rc < 0) - dev_warn(acpi_desc->dev, - "security event setup failed: %d\n", rc); - nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, @@ -2066,22 +2108,33 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) * these commands. */ enum nfit_aux_cmds { - NFIT_CMD_TRANSLATE_SPA = 5, - NFIT_CMD_ARS_INJECT_SET = 7, - NFIT_CMD_ARS_INJECT_CLEAR = 8, - NFIT_CMD_ARS_INJECT_GET = 9, + NFIT_CMD_TRANSLATE_SPA = 5, + NFIT_CMD_ARS_INJECT_SET = 7, + NFIT_CMD_ARS_INJECT_CLEAR = 8, + NFIT_CMD_ARS_INJECT_GET = 9, }; static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); + unsigned long dsm_mask, *mask; struct acpi_device *adev; - unsigned long dsm_mask; int i; - nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; - nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; + set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); + set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); + + /* enable nfit_test to inject bus command emulation */ + if (acpi_desc->bus_cmd_force_en) { + nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; + mask = &nd_desc->bus_family_mask; + if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } + } + adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -2089,7 +2142,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); - set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); dsm_mask = (1 << ND_CMD_ARS_CAP) | @@ -2102,7 +2154,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) (1 << NFIT_CMD_ARS_INJECT_GET); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) - set_bit(i, &nd_desc->bus_dsm_mask); + set_bit(i, &acpi_desc->bus_dsm_mask); + + /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ + dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; + guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) + if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) + set_bit(i, mask); + + if (*mask == dsm_mask) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } static ssize_t range_index_show(struct device *dev, @@ -2111,7 +2176,7 @@ static ssize_t range_index_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); - return sprintf(buf, "%d\n", nfit_spa->spa->range_index); + return sysfs_emit(buf, "%d\n", nfit_spa->spa->range_index); } static DEVICE_ATTR_RO(range_index); @@ -2126,50 +2191,30 @@ static const struct attribute_group acpi_nfit_region_attribute_group = { }; static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { - &nd_region_attribute_group, - &nd_mapping_attribute_group, - &nd_device_attribute_group, - &nd_numa_attribute_group, &acpi_nfit_region_attribute_group, NULL, }; /* enough info to uniquely specify an interleave set */ struct nfit_set_info { - struct nfit_set_info_map { - u64 region_offset; - u32 serial_number; - u32 pad; - } mapping[0]; + u64 region_offset; + u32 serial_number; + u32 pad; }; struct nfit_set_info2 { - struct nfit_set_info_map2 { - u64 region_offset; - u32 serial_number; - u16 vendor_id; - u16 manufacturing_date; - u8 manufacturing_location; - u8 reserved[31]; - } mapping[0]; + u64 region_offset; + u32 serial_number; + u16 vendor_id; + u16 manufacturing_date; + u8 manufacturing_location; + u8 reserved[31]; }; -static size_t sizeof_nfit_set_info(int num_mappings) -{ - return sizeof(struct nfit_set_info) - + num_mappings * sizeof(struct nfit_set_info_map); -} - -static size_t sizeof_nfit_set_info2(int num_mappings) -{ - return sizeof(struct nfit_set_info2) - + num_mappings * sizeof(struct nfit_set_info_map2); -} - static int cmp_map_compat(const void *m0, const void *m1) { - const struct nfit_set_info_map *map0 = m0; - const struct nfit_set_info_map *map1 = m1; + const struct nfit_set_info *map0 = m0; + const struct nfit_set_info *map1 = m1; return memcmp(&map0->region_offset, &map1->region_offset, sizeof(u64)); @@ -2177,8 +2222,8 @@ static int cmp_map_compat(const void *m0, const void *m1) static int cmp_map(const void *m0, const void *m1) { - const struct nfit_set_info_map *map0 = m0; - const struct nfit_set_info_map *map1 = m1; + const struct nfit_set_info *map0 = m0; + const struct nfit_set_info *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; @@ -2189,8 +2234,8 @@ static int cmp_map(const void *m0, const void *m1) static int cmp_map2(const void *m0, const void *m1) { - const struct nfit_set_info_map2 *map0 = m0; - const struct nfit_set_info_map2 *map1 = m1; + const struct nfit_set_info2 *map0 = m0; + const struct nfit_set_info2 *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; @@ -2216,34 +2261,31 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc, struct acpi_nfit_system_address *spa) { + u16 nr = ndr_desc->num_mappings; + struct nfit_set_info2 *info2 __free(kfree) = + kcalloc(nr, sizeof(*info2), GFP_KERNEL); + struct nfit_set_info *info __free(kfree) = + kcalloc(nr, sizeof(*info), GFP_KERNEL); struct device *dev = acpi_desc->dev; struct nd_interleave_set *nd_set; - u16 nr = ndr_desc->num_mappings; - struct nfit_set_info2 *info2; - struct nfit_set_info *info; int i; - nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); - if (!nd_set) - return -ENOMEM; - guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); - - info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); - if (!info) + if (!info || !info2) return -ENOMEM; - info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); - if (!info2) + nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); + if (!nd_set) return -ENOMEM; + import_guid(&nd_set->type_guid, spa->range_guid); for (i = 0; i < nr; i++) { struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; - struct nfit_set_info_map *map = &info->mapping[i]; - struct nfit_set_info_map2 *map2 = &info2->mapping[i]; struct nvdimm *nvdimm = mapping->nvdimm; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, - spa->range_index, i); + struct nfit_set_info *map = &info[i]; + struct nfit_set_info2 *map2 = &info2[i]; + struct acpi_nfit_memory_map *memdev = + memdev_from_spa(acpi_desc, spa->range_index, i); struct acpi_nfit_control_region *dcr = nfit_mem->dcr; if (!memdev || !nfit_mem->dcr) { @@ -2262,23 +2304,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, } /* v1.1 namespaces */ - sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), - cmp_map, NULL); - nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + sort(info, nr, sizeof(*info), cmp_map, NULL); + nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0); /* v1.2 namespaces */ - sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), - cmp_map2, NULL); - nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); + sort(info2, nr, sizeof(*info2), cmp_map2, NULL); + nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0); /* support v1.1 namespaces created with the wrong sort order */ - sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), - cmp_map_compat, NULL); - nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + sort(info, nr, sizeof(*info), cmp_map_compat, NULL); + nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0); /* record the result of the sort for the mapping position */ for (i = 0; i < nr; i++) { - struct nfit_set_info_map2 *map2 = &info2->mapping[i]; + struct nfit_set_info2 *map2 = &info2[i]; int j; for (j = 0; j < nr; j++) { @@ -2299,274 +2338,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, } ndr_desc->nd_set = nd_set; - devm_kfree(dev, info); - devm_kfree(dev, info2); - - return 0; -} - -static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) -{ - struct acpi_nfit_interleave *idt = mmio->idt; - u32 sub_line_offset, line_index, line_offset; - u64 line_no, table_skip_count, table_offset; - - line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); - table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); - line_offset = idt->line_offset[line_index] - * mmio->line_size; - table_offset = table_skip_count * mmio->table_size; - - return mmio->base_offset + line_offset + table_offset + sub_line_offset; -} - -static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) -{ - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; - u64 offset = nfit_blk->stat_offset + mmio->size * bw; - const u32 STATUS_MASK = 0x80000037; - - if (mmio->num_lines) - offset = to_interleave_offset(offset, mmio); - - return readl(mmio->addr.base + offset) & STATUS_MASK; -} - -static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, - resource_size_t dpa, unsigned int len, unsigned int write) -{ - u64 cmd, offset; - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; - - enum { - BCW_OFFSET_MASK = (1ULL << 48)-1, - BCW_LEN_SHIFT = 48, - BCW_LEN_MASK = (1ULL << 8) - 1, - BCW_CMD_SHIFT = 56, - }; - - cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; - len = len >> L1_CACHE_SHIFT; - cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; - cmd |= ((u64) write) << BCW_CMD_SHIFT; - - offset = nfit_blk->cmd_offset + mmio->size * bw; - if (mmio->num_lines) - offset = to_interleave_offset(offset, mmio); - - writeq(cmd, mmio->addr.base + offset); - nvdimm_flush(nfit_blk->nd_region); - - if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) - readq(mmio->addr.base + offset); -} - -static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, - resource_size_t dpa, void *iobuf, size_t len, int rw, - unsigned int lane) -{ - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; - unsigned int copied = 0; - u64 base_offset; - int rc; - - base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES - + lane * mmio->size; - write_blk_ctl(nfit_blk, lane, dpa, len, rw); - while (len) { - unsigned int c; - u64 offset; - - if (mmio->num_lines) { - u32 line_offset; - - offset = to_interleave_offset(base_offset + copied, - mmio); - div_u64_rem(offset, mmio->line_size, &line_offset); - c = min_t(size_t, len, mmio->line_size - line_offset); - } else { - offset = base_offset + nfit_blk->bdw_offset; - c = len; - } - - if (rw) - memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); - else { - if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) - arch_invalidate_pmem((void __force *) - mmio->addr.aperture + offset, c); - - memcpy(iobuf + copied, mmio->addr.aperture + offset, c); - } - - copied += c; - len -= c; - } - - if (rw) - nvdimm_flush(nfit_blk->nd_region); - - rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; - return rc; -} - -static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, - resource_size_t dpa, void *iobuf, u64 len, int rw) -{ - struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); - struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; - struct nd_region *nd_region = nfit_blk->nd_region; - unsigned int lane, copied = 0; - int rc = 0; - - lane = nd_region_acquire_lane(nd_region); - while (len) { - u64 c = min(len, mmio->size); - - rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, - iobuf + copied, c, rw, lane); - if (rc) - break; - - copied += c; - len -= c; - } - nd_region_release_lane(nd_region, lane); - - return rc; -} - -static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, - struct acpi_nfit_interleave *idt, u16 interleave_ways) -{ - if (idt) { - mmio->num_lines = idt->line_count; - mmio->line_size = idt->line_size; - if (interleave_ways == 0) - return -ENXIO; - mmio->table_size = mmio->num_lines * interleave_ways - * mmio->line_size; - } - - return 0; -} - -static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) -{ - struct nd_cmd_dimm_flags flags; - int rc; - - memset(&flags, 0, sizeof(flags)); - rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, - sizeof(flags), NULL); - - if (rc >= 0 && flags.status == 0) - nfit_blk->dimm_flags = flags.flags; - else if (rc == -ENOTTY) { - /* fall back to a conservative default */ - nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; - rc = 0; - } else - rc = -ENXIO; - - return rc; -} - -static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nfit_blk_mmio *mmio; - struct nfit_blk *nfit_blk; - struct nfit_mem *nfit_mem; - struct nvdimm *nvdimm; - int rc; - - nvdimm = nd_blk_region_to_dimm(ndbr); - nfit_mem = nvdimm_provider_data(nvdimm); - if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { - dev_dbg(dev, "missing%s%s%s\n", - nfit_mem ? "" : " nfit_mem", - (nfit_mem && nfit_mem->dcr) ? "" : " dcr", - (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); - return -ENXIO; - } - - nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); - if (!nfit_blk) - return -ENOMEM; - nd_blk_region_set_provider_data(ndbr, nfit_blk); - nfit_blk->nd_region = to_nd_region(dev); - - /* map block aperture memory */ - nfit_blk->bdw_offset = nfit_mem->bdw->offset; - mmio = &nfit_blk->mmio[BDW]; - mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, - nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr)); - if (!mmio->addr.base) { - dev_dbg(dev, "%s failed to map bdw\n", - nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = nfit_mem->bdw->size; - mmio->base_offset = nfit_mem->memdev_bdw->region_offset; - mmio->idt = nfit_mem->idt_bdw; - mmio->spa = nfit_mem->spa_bdw; - rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, - nfit_mem->memdev_bdw->interleave_ways); - if (rc) { - dev_dbg(dev, "%s failed to init bdw interleave\n", - nvdimm_name(nvdimm)); - return rc; - } - - /* map block control memory */ - nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; - nfit_blk->stat_offset = nfit_mem->dcr->status_offset; - mmio = &nfit_blk->mmio[DCR]; - mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, - nfit_mem->spa_dcr->length); - if (!mmio->addr.base) { - dev_dbg(dev, "%s failed to map dcr\n", - nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = nfit_mem->dcr->window_size; - mmio->base_offset = nfit_mem->memdev_dcr->region_offset; - mmio->idt = nfit_mem->idt_dcr; - mmio->spa = nfit_mem->spa_dcr; - rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, - nfit_mem->memdev_dcr->interleave_ways); - if (rc) { - dev_dbg(dev, "%s failed to init dcr interleave\n", - nvdimm_name(nvdimm)); - return rc; - } - - rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); - if (rc < 0) { - dev_dbg(dev, "%s failed get DIMM flags\n", - nvdimm_name(nvdimm)); - return rc; - } - - if (nvdimm_has_flush(nfit_blk->nd_region) < 0) - dev_warn(dev, "unable to guarantee persistence of writes\n"); - - if (mmio->line_size == 0) - return 0; - - if ((u32) nfit_blk->cmd_offset % mmio->line_size - + 8 > mmio->line_size) { - dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); - return -ENXIO; - } else if ((u32) nfit_blk->stat_offset % mmio->line_size - + 8 > mmio->line_size) { - dev_dbg(dev, "stat_offset crosses interleave boundary\n"); - return -ENXIO; - } return 0; } @@ -2613,7 +2384,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, if (rc < 0) return rc; - return cmd_rc; + if (cmd_rc < 0) + return cmd_rc; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); + return 0; } static int ars_continue(struct acpi_nfit_desc *acpi_desc) @@ -2623,11 +2397,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc) struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; - memset(&ars_start, 0, sizeof(ars_start)); - ars_start.address = ars_status->restart_address; - ars_start.length = ars_status->restart_length; - ars_start.type = ars_status->type; - ars_start.flags = acpi_desc->ars_start_flags; + ars_start = (struct nd_cmd_ars_start) { + .address = ars_status->restart_address, + .length = ars_status->restart_length, + .type = ars_status->type, + }; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) @@ -2706,6 +2480,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) */ if (ars_status->out_length < 44) return 0; + + /* + * Ignore potentially stale results that are only refreshed + * after a start-ARS event. + */ + if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { + dev_dbg(acpi_desc->dev, "skip %d stale records\n", + ars_status->num_records); + return 0; + } + for (i = 0; i < ars_status->num_records; i++) { /* only process full records */ if (ars_status->out_length @@ -2773,9 +2558,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, memdev->device_handle); struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc *ndbr_desc; - struct nfit_mem *nfit_mem; - int rc; if (!nvdimm) { dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", @@ -2790,30 +2572,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, mapping->start = memdev->address; mapping->size = memdev->region_size; break; - case NFIT_SPA_DCR: - nfit_mem = nvdimm_provider_data(nvdimm); - if (!nfit_mem || !nfit_mem->bdw) { - dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", - spa->range_index, nvdimm_name(nvdimm)); - break; - } - - mapping->size = nfit_mem->bdw->capacity; - mapping->start = nfit_mem->bdw->start_address; - ndr_desc->num_lanes = nfit_mem->bdw->windows; - ndr_desc->mapping = mapping; - ndr_desc->num_mappings = 1; - ndbr_desc = to_blk_region_desc(ndr_desc); - ndbr_desc->enable = acpi_nfit_blk_region_enable; - ndbr_desc->do_io = acpi_desc->blk_do_io; - rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); - if (rc) - return rc; - nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, - ndr_desc); - if (!nfit_spa->nd_region) - return -ENOMEM; - break; } return 0; @@ -2839,8 +2597,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, { static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc ndbr_desc; - struct nd_region_desc *ndr_desc; + struct nd_region_desc *ndr_desc, _ndr_desc; struct nfit_memdev *nfit_memdev; struct nvdimm_bus *nvdimm_bus; struct resource res; @@ -2856,18 +2613,32 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); res.start = spa->address; res.end = res.start + spa->length - 1; - ndr_desc = &ndbr_desc.ndr_desc; + ndr_desc = &_ndr_desc; ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; - if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) - ndr_desc->numa_node = acpi_map_pxm_to_online_node( - spa->proximity_domain); - else + if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { + ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain); + ndr_desc->target_node = pxm_to_node(spa->proximity_domain); + } else { ndr_desc->numa_node = NUMA_NO_NODE; + ndr_desc->target_node = NUMA_NO_NODE; + } + + /* Fallback to address based numa information if node lookup failed */ + if (ndr_desc->numa_node == NUMA_NO_NODE) { + ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address); + dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]", + NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); + } + if (ndr_desc->target_node == NUMA_NO_NODE) { + ndr_desc->target_node = phys_to_target_node(spa->address); + dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", + NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end); + } /* * Persistence domain bits are hierarchical, if @@ -2883,6 +2654,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nd_mapping_desc *mapping; + /* range index 0 == unmapped in SPA or invalid-SPA */ + if (memdev->range_index == 0 || spa->range_index == 0) + continue; if (memdev->range_index != spa->range_index) continue; if (count >= ND_MAX_MAPPINGS) { @@ -2976,14 +2750,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, { int rc; - if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) + if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) return acpi_nfit_register_region(acpi_desc, nfit_spa); set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); - set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); + if (!no_init_ars) + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); switch (acpi_nfit_query_poison(acpi_desc)) { case 0: + case -ENOSPC: case -EAGAIN: rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); /* shouldn't happen, try again later */ @@ -3008,7 +2784,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, break; case -EBUSY: case -ENOMEM: - case -ENOSPC: /* * BIOS was using ARS, wait for it to complete (or * resources to become available) and then perform our @@ -3043,7 +2818,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, lockdep_assert_held(&acpi_desc->init_mutex); - if (acpi_desc->cancel) + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) return 0; if (query_rc == -EBUSY) { @@ -3117,7 +2892,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 1; + set_bit(ARS_BUSY, &acpi_desc->scrub_flags); /* note this should only be set from within the workqueue */ if (tmo) acpi_desc->scrub_tmo = tmo; @@ -3133,7 +2908,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 0; + clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); acpi_desc->scrub_count++; if (acpi_desc->scrub_count_state) sysfs_notify_dirent(acpi_desc->scrub_count_state); @@ -3154,6 +2929,7 @@ static void acpi_nfit_scrub(struct work_struct *work) else notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); + clear_bit(ARS_POLL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); } @@ -3186,8 +2962,9 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc; + int rc, do_sched_ars = 0; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: @@ -3197,7 +2974,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) } } - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: @@ -3205,6 +2982,13 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) rc = ars_register(acpi_desc, nfit_spa); if (rc) return rc; + + /* + * Kick off background ARS if at least one + * region successfully registered ARS + */ + if (!test_bit(ARS_FAILED, &nfit_spa->ars_state)) + do_sched_ars++; break; case NFIT_SPA_BDW: /* nothing to register */ @@ -3223,8 +3007,10 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) /* don't register unknown regions */ break; } + } - sched_ars(acpi_desc); + if (do_sched_ars) + sched_ars(acpi_desc); return 0; } @@ -3397,7 +3183,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -/* prevent security commands from being issued via ioctl */ +/* + * Prevent security and firmware activate commands from being issued via + * ioctl. + */ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf) { @@ -3407,10 +3196,16 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, if (nvdimm && cmd == ND_CMD_CALL && call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; - if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) + if (func > NVDIMM_CMD_MAX || + (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) return -EOPNOTSUPP; } + /* block all non-nfit bus commands */ + if (!nvdimm && cmd == ND_CMD_CALL && + call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) + return -EOPNOTSUPP; + return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); } @@ -3422,7 +3217,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa; mutex_lock(&acpi_desc->init_mutex); - if (acpi_desc->cancel) { + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { mutex_unlock(&acpi_desc->init_mutex); return 0; } @@ -3459,7 +3254,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) dev_set_drvdata(dev, acpi_desc); acpi_desc->dev = dev; - acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; nd_desc = &acpi_desc->nd_desc; nd_desc->provider_name = "ACPI.NFIT"; nd_desc->module = THIS_MODULE; @@ -3487,6 +3281,23 @@ static void acpi_nfit_put_table(void *table) acpi_put_table(table); } +static void acpi_nfit_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_device *adev = data; + + device_lock(&adev->dev); + __acpi_nfit_notify(&adev->dev, handle, event); + device_unlock(&adev->dev); +} + +static void acpi_nfit_remove_notify_handler(void *data) +{ + struct acpi_device *adev = data; + + acpi_dev_remove_notify_handler(adev, ACPI_DEVICE_NOTIFY, + acpi_nfit_notify); +} + void acpi_nfit_shutdown(void *data) { struct acpi_nfit_desc *acpi_desc = data; @@ -3501,9 +3312,9 @@ void acpi_nfit_shutdown(void *data) mutex_unlock(&acpi_desc_lock); mutex_lock(&acpi_desc->init_mutex); - acpi_desc->cancel = 1; - cancel_delayed_work_sync(&acpi_desc->dwork); + set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); + cancel_delayed_work_sync(&acpi_desc->dwork); /* * Bounce the nvdimm bus lock to make sure any in-flight @@ -3527,6 +3338,16 @@ static int acpi_nfit_add(struct acpi_device *adev) acpi_size sz; int rc = 0; + rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY, + acpi_nfit_notify, adev); + if (rc) + return rc; + + rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler, + adev); + if (rc) + return rc; + status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); if (ACPI_FAILURE(status)) { /* The NVDIMM root device allows OS to trigger enumeration of @@ -3573,13 +3394,8 @@ static int acpi_nfit_add(struct acpi_device *adev) if (rc) return rc; - return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); -} -static int acpi_nfit_remove(struct acpi_device *adev) -{ - /* see acpi_nfit_unregister */ - return 0; + return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); } static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) @@ -3652,13 +3468,6 @@ void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) } EXPORT_SYMBOL_GPL(__acpi_nfit_notify); -static void acpi_nfit_notify(struct acpi_device *adev, u32 event) -{ - device_lock(&adev->dev); - __acpi_nfit_notify(&adev->dev, adev->handle, event); - device_unlock(&adev->dev); -} - static const struct acpi_device_id acpi_nfit_ids[] = { { "ACPI0012", 0 }, { "", 0 }, @@ -3670,8 +3479,6 @@ static struct acpi_driver acpi_nfit_driver = { .ids = acpi_nfit_ids, .ops = { .add = acpi_nfit_add, - .remove = acpi_nfit_remove, - .notify = acpi_nfit_notify, }, }; @@ -3680,10 +3487,10 @@ static __init int nfit_init(void) int ret; BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); - BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); + BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); - BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); - BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); + BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 16); + BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 8); BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); @@ -3701,6 +3508,8 @@ static __init int nfit_init(void) guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); + guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); + guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) @@ -3727,5 +3536,6 @@ static __exit void nfit_exit(void) module_init(nfit_init); module_exit(nfit_exit); +MODULE_DESCRIPTION("ACPI NVDIMM Firmware Interface Table (NFIT) driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index f70de71f79d6..bce6f6a18426 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -3,18 +3,61 @@ #include <linux/libnvdimm.h> #include <linux/ndctl.h> #include <linux/acpi.h> +#include <linux/memregion.h> #include <asm/smp.h> #include "intel.h" #include "nfit.h" -static enum nvdimm_security_state intel_security_state(struct nvdimm *nvdimm, +static ssize_t firmware_activate_noidle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); +} + +static ssize_t firmware_activate_noidle_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + ssize_t rc; + bool val; + + rc = kstrtobool(buf, &val); + if (rc) + return rc; + if (val != acpi_desc->fwa_noidle) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; + acpi_desc->fwa_noidle = val; + return size; +} +DEVICE_ATTR_RW(firmware_activate_noidle); + +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + unsigned long *mask; + + if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) + return false; + + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; +} + +static unsigned long intel_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + unsigned long security_flags = 0; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_get_security_state cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -27,7 +70,7 @@ static enum nvdimm_security_state intel_security_state(struct nvdimm *nvdimm, int rc; if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask)) - return -ENXIO; + return 0; /* * Short circuit the state retrieval while we are doing overwrite. @@ -35,47 +78,50 @@ static enum nvdimm_security_state intel_security_state(struct nvdimm *nvdimm, * until the overwrite DSM completes. */ if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER) - return NVDIMM_SECURITY_OVERWRITE; + return BIT(NVDIMM_SECURITY_OVERWRITE); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); - if (rc < 0) - return rc; - if (nd_cmd.cmd.status) - return -EIO; + if (rc < 0 || nd_cmd.cmd.status) { + pr_err("%s: security state retrieval failed (%d:%#x)\n", + nvdimm_name(nvdimm), rc, nd_cmd.cmd.status); + return 0; + } /* check and see if security is enabled and locked */ if (ptype == NVDIMM_MASTER) { if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED) - return NVDIMM_SECURITY_UNLOCKED; - else if (nd_cmd.cmd.extended_state & - ND_INTEL_SEC_ESTATE_PLIMIT) - return NVDIMM_SECURITY_FROZEN; - } else { - if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED) - return -ENXIO; - else if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) { - if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED) - return NVDIMM_SECURITY_LOCKED; - else if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN - || nd_cmd.cmd.state & - ND_INTEL_SEC_STATE_PLIMIT) - return NVDIMM_SECURITY_FROZEN; - else - return NVDIMM_SECURITY_UNLOCKED; - } + set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); + else + set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); + if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT) + set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); + return security_flags; } - /* this should cover master security disabled as well */ - return NVDIMM_SECURITY_DISABLED; + if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED) + return 0; + + if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) { + if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN || + nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT) + set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); + + if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED) + set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); + else + set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); + } else + set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); + + return security_flags; } static int intel_security_freeze(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_freeze_lock cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FREEZE_LOCK, .nd_family = NVDIMM_FAMILY_INTEL, @@ -105,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm, unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_SET_MASTER_PASSPHRASE : NVDIMM_INTEL_SET_PASSPHRASE; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_set_passphrase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2, @@ -122,9 +167,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; - if (old_data) - memcpy(nd_cmd.cmd.old_pass, old_data->data, - sizeof(nd_cmd.cmd.old_pass)); + memcpy(nd_cmd.cmd.old_pass, old_data->data, + sizeof(nd_cmd.cmd.old_pass)); memcpy(nd_cmd.cmd.new_pass, new_data->data, sizeof(nd_cmd.cmd.new_pass)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -144,16 +188,13 @@ static int intel_security_change_key(struct nvdimm *nvdimm, } } -static void nvdimm_invalidate_cache(void); - static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_unlock_unit cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_UNLOCK_UNIT, .nd_family = NVDIMM_FAMILY_INTEL, @@ -181,9 +222,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, return -EIO; } - /* DIMM unlocked, invalidate all CPU caches before we read it */ - nvdimm_invalidate_cache(); - return 0; } @@ -192,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm, { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_disable_passphrase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -235,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE; - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_secure_erase cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, @@ -251,8 +287,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; - /* flush all cache before we erase DIMM */ - nvdimm_invalidate_cache(); memcpy(nd_cmd.cmd.passphrase, key->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -271,8 +305,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, return -ENXIO; } - /* DIMM erased, invalidate all CPU caches before we read it */ - nvdimm_invalidate_cache(); return 0; } @@ -280,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_query_overwrite cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -308,8 +339,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) return -ENXIO; } - /* flush all cache before we make the nvdimms available */ - nvdimm_invalidate_cache(); return 0; } @@ -318,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct { - struct nd_cmd_pkg pkg; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, struct nd_intel_overwrite cmd; - } nd_cmd = { + ) nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, @@ -334,11 +362,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; - /* flush all cache before we erase DIMM */ - nvdimm_invalidate_cache(); - if (nkey) - memcpy(nd_cmd.cmd.passphrase, nkey->data, - sizeof(nd_cmd.cmd.passphrase)); + memcpy(nd_cmd.cmd.passphrase, nkey->data, + sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; @@ -356,24 +381,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, } } -/* - * TODO: define a cross arch wbinvd equivalent when/if - * NVDIMM_FAMILY_INTEL command support arrives on another arch. - */ -#ifdef CONFIG_X86 -static void nvdimm_invalidate_cache(void) -{ - wbinvd_on_all_cpus(); -} -#else -static void nvdimm_invalidate_cache(void) -{ - WARN_ON_ONCE("cache invalidation required after unlock\n"); -} -#endif - static const struct nvdimm_security_ops __intel_security_ops = { - .state = intel_security_state, + .get_flags = intel_security_flags, .freeze = intel_security_freeze, .change_key = intel_security_change_key, .disable = intel_security_disable, @@ -386,3 +395,340 @@ static const struct nvdimm_security_ops __intel_security_ops = { }; const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; + +static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, + struct nd_intel_bus_fw_activate_businfo *info) +{ + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, + struct nd_intel_bus_fw_activate_businfo cmd; + ) nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate_businfo), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate_businfo), + }, + }; + int rc; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + *info = nd_cmd.cmd; + return rc; +} + +/* The fw_ops expect to be called with the nvdimm_bus_lock() held */ +static enum nvdimm_fwa_state intel_bus_fwa_state( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_intel_bus_fw_activate_businfo info; + struct device *dev = acpi_desc->dev; + enum nvdimm_fwa_state state; + int rc; + + /* + * It should not be possible for platform firmware to return + * busy because activate is a synchronous operation. Treat it + * similar to invalid, i.e. always refresh / poll the status. + */ + switch (acpi_desc->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* check if capability needs to be refreshed */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) + break; + return acpi_desc->fwa_state; + } + + /* Refresh with platform firmware */ + rc = intel_bus_fwa_businfo(nd_desc, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + if (info.activate_tmo > info.max_quiesce_tmo) + state = NVDIMM_FWA_ARM_OVERFLOW; + else + state = NVDIMM_FWA_ARMED; + break; + default: + dev_err_once(dev, "invalid firmware activate state %d\n", + info.state); + return NVDIMM_FWA_INVALID; + } + + /* + * Capability data is available in the same payload as state. It + * is expected to be static. + */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { + if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; + else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { + /* + * Skip hibernate cycle by default if platform + * indicates that it does not need devices to be + * quiesced. + */ + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; + } else + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; + } + + acpi_desc->fwa_state = state; + + return state; +} + +static enum nvdimm_fwa_capability intel_bus_fwa_capability( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) + return acpi_desc->fwa_cap; + + if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) + return acpi_desc->fwa_cap; + + return NVDIMM_FWA_CAP_INVALID; +} + +static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, + struct nd_intel_bus_fw_activate cmd; + ) nd_cmd; + int rc; + + nd_cmd.pkg = (struct nd_cmd_pkg) { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate), + }; + nd_cmd.cmd = (struct nd_intel_bus_fw_activate) { + /* + * Even though activate is run from a suspended context, + * for safety, still ask platform firmware to force + * quiesce devices by default. Let a module + * parameter override that policy. + */ + .iodev_state = acpi_desc->fwa_noidle + ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE + : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, + }; + switch (intel_bus_fwa_state(nd_desc)) { + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + break; + default: + return -ENXIO; + } + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + + /* + * Whether the command succeeded, or failed, the agent checking + * for the result needs to query the DIMMs individually. + * Increment the activation count to invalidate all the DIMM + * states at once (it's otherwise not possible to take + * acpi_desc->init_mutex in this context) + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + acpi_desc->fwa_count++; + + dev_dbg(acpi_desc->dev, "result: %d\n", rc); + + return rc; +} + +static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { + .activate_state = intel_bus_fwa_state, + .capability = intel_bus_fwa_capability, + .activate = intel_bus_fwa_activate, +}; + +const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; + +static int intel_fwa_dimminfo(struct nvdimm *nvdimm, + struct nd_intel_fw_activate_dimminfo *info) +{ + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, + struct nd_intel_fw_activate_dimminfo cmd; + ) nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_fw_activate_dimminfo), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_dimminfo), + }, + }; + int rc; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + *info = nd_cmd.cmd; + return rc; +} + +static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nd_intel_fw_activate_dimminfo info; + int rc; + + /* + * Similar to the bus state, since activate is synchronous the + * busy state should resolve within the context of 'activate'. + */ + switch (nfit_mem->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* If no activations occurred the old state is still valid */ + if (nfit_mem->fwa_count == acpi_desc->fwa_count) + return nfit_mem->fwa_state; + } + + rc = intel_fwa_dimminfo(nvdimm, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + nfit_mem->fwa_state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + nfit_mem->fwa_state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + nfit_mem->fwa_state = NVDIMM_FWA_ARMED; + break; + default: + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + break; + } + + switch (info.result) { + case ND_INTEL_DIMM_FWA_NONE: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; + break; + case ND_INTEL_DIMM_FWA_SUCCESS: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; + break; + case ND_INTEL_DIMM_FWA_NOTSTAGED: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; + break; + case ND_INTEL_DIMM_FWA_NEEDRESET: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; + break; + case ND_INTEL_DIMM_FWA_MEDIAFAILED: + case ND_INTEL_DIMM_FWA_ABORT: + case ND_INTEL_DIMM_FWA_NOTSUPP: + case ND_INTEL_DIMM_FWA_ERROR: + default: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; + break; + } + + nfit_mem->fwa_count = acpi_desc->fwa_count; + + return nfit_mem->fwa_state; +} + +static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + + if (nfit_mem->fwa_count == acpi_desc->fwa_count + && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) + return nfit_mem->fwa_result; + + if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) + return nfit_mem->fwa_result; + + return NVDIMM_FWA_RESULT_INVALID; +} + +static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload, + struct nd_intel_fw_activate_arm cmd; + ) nd_cmd; + int rc; + + nd_cmd.pkg = (struct nd_cmd_pkg) { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), + .nd_size_out = sizeof(struct nd_intel_fw_activate_arm), + .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm), + }; + nd_cmd.cmd = (struct nd_intel_fw_activate_arm) { + .activate_arm = arm == NVDIMM_FWA_ARM ? + ND_INTEL_DIMM_FWA_ARM : + ND_INTEL_DIMM_FWA_DISARM, + }; + + switch (intel_fwa_state(nvdimm)) { + case NVDIMM_FWA_INVALID: + return -ENXIO; + case NVDIMM_FWA_BUSY: + return -EBUSY; + case NVDIMM_FWA_IDLE: + if (arm == NVDIMM_FWA_DISARM) + return 0; + break; + case NVDIMM_FWA_ARMED: + if (arm == NVDIMM_FWA_ARM) + return 0; + break; + default: + return -ENXIO; + } + + /* + * Invalidate the bus-level state, now that we're committed to + * changing the 'arm' state. + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + + dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM + ? "arm" : "disarm", rc); + return rc; +} + +static const struct nvdimm_fw_ops __intel_fw_ops = { + .activate_state = intel_fwa_state, + .activate_result = intel_fwa_result, + .arm = intel_fwa_arm, +}; + +const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h index 0aca682ab9d7..b768234ccebc 100644 --- a/drivers/acpi/nfit/intel.h +++ b/drivers/acpi/nfit/intel.h @@ -111,4 +111,65 @@ struct nd_intel_master_secure_erase { u8 passphrase[ND_INTEL_PASSPHRASE_SIZE]; u32 status; } __packed; + +#define ND_INTEL_FWA_IDLE 0 +#define ND_INTEL_FWA_ARMED 1 +#define ND_INTEL_FWA_BUSY 2 + +#define ND_INTEL_DIMM_FWA_NONE 0 +#define ND_INTEL_DIMM_FWA_NOTSTAGED 1 +#define ND_INTEL_DIMM_FWA_SUCCESS 2 +#define ND_INTEL_DIMM_FWA_NEEDRESET 3 +#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4 +#define ND_INTEL_DIMM_FWA_ABORT 5 +#define ND_INTEL_DIMM_FWA_NOTSUPP 6 +#define ND_INTEL_DIMM_FWA_ERROR 7 + +struct nd_intel_fw_activate_dimminfo { + u32 status; + u16 result; + u8 state; + u8 reserved[7]; +} __packed; + +#define ND_INTEL_DIMM_FWA_ARM 1 +#define ND_INTEL_DIMM_FWA_DISARM 0 + +struct nd_intel_fw_activate_arm { + u8 activate_arm; + u32 status; +} __packed; + +/* Root device command payloads */ +#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0) +#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1) +#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2) + +struct nd_intel_bus_fw_activate_businfo { + u32 status; + u16 reserved; + u8 state; + u8 capability; + u64 activate_tmo; + u64 cpu_quiesce_tmo; + u64 io_quiesce_tmo; + u64 max_quiesce_tmo; +} __packed; + +#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16) +#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16) +#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16) +#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16) + +#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0) +#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1) +struct nd_intel_bus_fw_activate { + u8 iodev_state; + u32 status; +} __packed; + +extern const struct nvdimm_fw_ops *intel_fw_ops; +extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops; #endif diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index d6c1b10f6c25..d48a388b796e 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * NFIT - Machine Check Handler * * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #include <linux/notifier.h> #include <linux/acpi.h> @@ -40,6 +32,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, */ mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { + unsigned int align = 1UL << MCI_MISC_ADDR_LSB(mce->misc); struct device *dev = acpi_desc->dev; int found_match = 0; @@ -71,8 +64,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, /* If this fails due to an -ENOMEM, there is little we can do */ nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus, - ALIGN(mce->addr, L1_CACHE_BYTES), - L1_CACHE_BYTES); + ALIGN_DOWN(mce->addr, align), align); nvdimm_region_notify(nfit_spa->nd_region, NVDIMM_REVALIDATE_POISON); @@ -84,6 +76,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, */ acpi_nfit_ars_rescan(acpi_desc, 0); } + mce->kflags |= MCE_HANDLED_NFIT; break; } diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 33691aecfcee..573bc0de2990 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * NVDIMM Firmware Interface Table - NFIT * * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #ifndef __NFIT_H__ #define __NFIT_H__ @@ -24,8 +16,9 @@ /* ACPI 6.1 */ #define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba" -/* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */ +/* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */ #define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" +#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149" /* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */ #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" @@ -34,11 +27,14 @@ /* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ #define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" +/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */ +#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80" + #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) -#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT +#define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ @@ -70,6 +66,13 @@ enum nvdimm_family_cmds { NVDIMM_INTEL_QUERY_OVERWRITE = 26, NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27, NVDIMM_INTEL_MASTER_SECURE_ERASE = 28, + NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29, + NVDIMM_INTEL_FW_ACTIVATE_ARM = 30, +}; + +enum nvdimm_bus_family_cmds { + NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1, + NVDIMM_BUS_INTEL_FW_ACTIVATE = 2, }; #define NVDIMM_INTEL_SECURITY_CMDMASK \ @@ -80,13 +83,22 @@ enum nvdimm_family_cmds { | 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \ | 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE) +#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM) + +#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE) + #define NVDIMM_INTEL_CMDMASK \ (NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \ | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \ | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \ | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \ | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \ - | NVDIMM_INTEL_SECURITY_CMDMASK) + | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + +#define NVDIMM_INTEL_DENY_CMDMASK \ +(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) enum nfit_uuids { /* for simplicity alias the uuid index with the family id */ @@ -94,6 +106,12 @@ enum nfit_uuids { NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, + NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, + /* + * to_nfit_bus_uuid() expects to translate bus uuid family ids + * to a UUID index using NVDIMM_FAMILY_MAX as an offset + */ + NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, @@ -148,32 +166,32 @@ struct nfit_spa { unsigned long ars_state; u32 clear_err_unit; u32 max_ars; - struct acpi_nfit_system_address spa[0]; + struct acpi_nfit_system_address spa[]; }; struct nfit_dcr { struct list_head list; - struct acpi_nfit_control_region dcr[0]; + struct acpi_nfit_control_region dcr[]; }; struct nfit_bdw { struct list_head list; - struct acpi_nfit_data_region bdw[0]; + struct acpi_nfit_data_region bdw[]; }; struct nfit_idt { struct list_head list; - struct acpi_nfit_interleave idt[0]; + struct acpi_nfit_interleave idt[]; }; struct nfit_flush { struct list_head list; - struct acpi_nfit_flush_address flush[0]; + struct acpi_nfit_flush_address flush[]; }; struct nfit_memdev { struct list_head list; - struct acpi_nfit_memory_map memdev[0]; + struct acpi_nfit_memory_map memdev[]; }; enum nfit_mem_flags { @@ -190,18 +208,17 @@ struct nfit_mem { struct nvdimm *nvdimm; struct acpi_nfit_memory_map *memdev_dcr; struct acpi_nfit_memory_map *memdev_pmem; - struct acpi_nfit_memory_map *memdev_bdw; struct acpi_nfit_control_region *dcr; - struct acpi_nfit_data_region *bdw; struct acpi_nfit_system_address *spa_dcr; - struct acpi_nfit_system_address *spa_bdw; struct acpi_nfit_interleave *idt_dcr; - struct acpi_nfit_interleave *idt_bdw; struct kernfs_node *flags_attr; struct nfit_flush *nfit_flush; struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_result fwa_result; + int fwa_count; char id[NFIT_DIMM_ID_LEN+1]; struct resource *flush_wpq; unsigned long dsm_mask; @@ -210,6 +227,13 @@ struct nfit_mem { int family; }; +enum scrub_flags { + ARS_BUSY, + ARS_CANCEL, + ARS_VALID, + ARS_POLL, +}; + struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; @@ -223,7 +247,6 @@ struct acpi_nfit_desc { struct list_head idts; struct nvdimm_bus *nvdimm_bus; struct device *dev; - u8 ars_start_flags; struct nd_cmd_ars_status *ars_status; struct nfit_spa *scrub_spa; struct delayed_work dwork; @@ -232,15 +255,18 @@ struct acpi_nfit_desc { unsigned int max_ars; unsigned int scrub_count; unsigned int scrub_mode; - unsigned int scrub_busy:1; - unsigned int cancel:1; + unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; - unsigned long bus_nfit_cmd_force_en; + unsigned long bus_dsm_mask; + unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1]; unsigned int platform_cap; unsigned int scrub_tmo; - int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_capability fwa_cap; + int fwa_count; + bool fwa_noidle; + bool fwa_nosuspend; }; enum scrub_mode { @@ -319,4 +345,8 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event); int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus); +extern struct device_attribute dev_attr_firmware_activate_noidle; +void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem); + #endif /* __NFIT_H__ */ diff --git a/drivers/acpi/nhlt.c b/drivers/acpi/nhlt.c new file mode 100644 index 000000000000..dc1bd0df9228 --- /dev/null +++ b/drivers/acpi/nhlt.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2023-2024 Intel Corporation + * + * Authors: Cezary Rojewski <cezary.rojewski@intel.com> + * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> + */ + +#define pr_fmt(fmt) "ACPI: NHLT: " fmt + +#include <linux/acpi.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/minmax.h> +#include <linux/printk.h> +#include <linux/types.h> +#include <acpi/nhlt.h> + +static struct acpi_table_nhlt *acpi_gbl_nhlt; + +static struct acpi_table_nhlt empty_nhlt = { + .header = { + .signature = ACPI_SIG_NHLT, + }, +}; + +/** + * acpi_nhlt_get_gbl_table - Retrieve a pointer to the first NHLT table. + * + * If there is no NHLT in the system, acpi_gbl_nhlt will instead point to an + * empty table. + * + * Return: ACPI status code of the operation. + */ +acpi_status acpi_nhlt_get_gbl_table(void) +{ + acpi_status status; + + status = acpi_get_table(ACPI_SIG_NHLT, 0, (struct acpi_table_header **)(&acpi_gbl_nhlt)); + if (!acpi_gbl_nhlt) + acpi_gbl_nhlt = &empty_nhlt; + return status; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_get_gbl_table); + +/** + * acpi_nhlt_put_gbl_table - Release the global NHLT table. + */ +void acpi_nhlt_put_gbl_table(void) +{ + acpi_put_table((struct acpi_table_header *)acpi_gbl_nhlt); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_put_gbl_table); + +/** + * acpi_nhlt_endpoint_match - Verify if an endpoint matches criteria. + * @ep: the endpoint to check. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter when matching. + * + * Return: %true if endpoint matches specified criteria or %false otherwise. + */ +bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, + int link_type, int dev_type, int dir, int bus_id) +{ + return ep && + (link_type < 0 || ep->link_type == link_type) && + (dev_type < 0 || ep->device_type == dev_type) && + (bus_id < 0 || ep->virtual_bus_id == bus_id) && + (dir < 0 || ep->direction == dir); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_match); + +/** + * acpi_nhlt_tb_find_endpoint - Search a NHLT table for an endpoint. + * @tb: the table to search. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to endpoint matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_endpoint * +acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id) +{ + struct acpi_nhlt_endpoint *ep; + + for_each_nhlt_endpoint(tb, ep) + if (acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id)) + return ep; + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_endpoint); + +/** + * acpi_nhlt_find_endpoint - Search all NHLT tables for an endpoint. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to endpoint matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_endpoint * +acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id) +{ + /* TODO: Currently limited to table of index 0. */ + return acpi_nhlt_tb_find_endpoint(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_find_endpoint); + +/** + * acpi_nhlt_endpoint_find_fmtcfg - Search endpoint's formats configuration space + * for a specific format. + * @ep: the endpoint to search. + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + struct acpi_nhlt_wave_formatext *wav; + struct acpi_nhlt_format_config *fmt; + + for_each_nhlt_endpoint_fmtcfg(ep, fmt) { + wav = &fmt->format; + + if (wav->valid_bits_per_sample == vbps && + wav->samples_per_sec == rate && + wav->bits_per_sample == bps && + wav->channel_count == ch) + return fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_find_fmtcfg); + +/** + * acpi_nhlt_tb_find_fmtcfg - Search a NHLT table for a specific format. + * @tb: the table to search. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + struct acpi_nhlt_format_config *fmt; + struct acpi_nhlt_endpoint *ep; + + for_each_nhlt_endpoint(tb, ep) { + if (!acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id)) + continue; + + fmt = acpi_nhlt_endpoint_find_fmtcfg(ep, ch, rate, vbps, bps); + if (fmt) + return fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_fmtcfg); + +/** + * acpi_nhlt_find_fmtcfg - Search all NHLT tables for a specific format. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + /* TODO: Currently limited to table of index 0. */ + return acpi_nhlt_tb_find_fmtcfg(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id, + ch, rate, vbps, bps); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_find_fmtcfg); + +static bool acpi_nhlt_config_is_micdevice(struct acpi_nhlt_config *cfg) +{ + return cfg->capabilities_size >= sizeof(struct acpi_nhlt_micdevice_config); +} + +static bool acpi_nhlt_config_is_vendor_micdevice(struct acpi_nhlt_config *cfg) +{ + struct acpi_nhlt_vendor_micdevice_config *devcfg = __acpi_nhlt_config_caps(cfg); + + return cfg->capabilities_size >= sizeof(*devcfg) && + cfg->capabilities_size == struct_size(devcfg, mics, devcfg->mics_count); +} + +/** + * acpi_nhlt_endpoint_mic_count - Retrieve number of digital microphones for a PDM endpoint. + * @ep: the endpoint to return microphones count for. + * + * Return: A number of microphones or an error code if an invalid endpoint is provided. + */ +int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep) +{ + union acpi_nhlt_device_config *devcfg; + struct acpi_nhlt_format_config *fmt; + struct acpi_nhlt_config *cfg; + u16 max_ch = 0; + + if (!ep || ep->link_type != ACPI_NHLT_LINKTYPE_PDM) + return -EINVAL; + + /* Find max number of channels based on formats configuration. */ + for_each_nhlt_endpoint_fmtcfg(ep, fmt) + max_ch = max(fmt->format.channel_count, max_ch); + + cfg = __acpi_nhlt_endpoint_config(ep); + devcfg = __acpi_nhlt_config_caps(cfg); + + /* If @ep is not a mic array, fallback to channels count. */ + if (!acpi_nhlt_config_is_micdevice(cfg) || + devcfg->gen.config_type != ACPI_NHLT_CONFIGTYPE_MICARRAY) + return max_ch; + + switch (devcfg->mic.array_type) { + case ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL: + case ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG: + return 2; + + case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1: + case ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED: + case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2: + return 4; + + case ACPI_NHLT_ARRAYTYPE_VENDOR: + if (!acpi_nhlt_config_is_vendor_micdevice(cfg)) + return -EINVAL; + return devcfg->vendor_mic.mics_count; + + default: + pr_warn("undefined mic array type: %#x\n", devcfg->mic.array_type); + return max_ch; + } +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_mic_count); diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig new file mode 100644 index 000000000000..f33194d1e43f --- /dev/null +++ b/drivers/acpi/numa/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +config ACPI_NUMA + def_bool NUMA && !X86 + +config ACPI_HMAT + bool "ACPI Heterogeneous Memory Attribute Table Support" + depends on ACPI_NUMA + select HMEM_REPORTING + select MEMREGION + help + If set, this option has the kernel parse and report the + platform's ACPI HMAT (Heterogeneous Memory Attributes Table), + register memory initiators with their targets, and export + performance attributes through the node's sysfs device if + provided. diff --git a/drivers/acpi/numa/Makefile b/drivers/acpi/numa/Makefile new file mode 100644 index 000000000000..517a6c689a94 --- /dev/null +++ b/drivers/acpi/numa/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ACPI_NUMA) += srat.o +obj-$(CONFIG_ACPI_HMAT) += hmat.o diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c new file mode 100644 index 000000000000..77a81627aaef --- /dev/null +++ b/drivers/acpi/numa/hmat.c @@ -0,0 +1,1104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Intel Corporation. + * + * Heterogeneous Memory Attributes Table (HMAT) representation + * + * This program parses and reports the platform's HMAT tables, and registers + * the applicable attributes with the node's interfaces. + */ + +#define pr_fmt(fmt) "acpi/hmat: " fmt + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/list_sort.h> +#include <linux/memregion.h> +#include <linux/memory.h> +#include <linux/mutex.h> +#include <linux/node.h> +#include <linux/sysfs.h> +#include <linux/dax.h> +#include <linux/memory-tiers.h> + +static u8 hmat_revision; +static int hmat_disable __initdata; + +void __init disable_hmat(void) +{ + hmat_disable = 1; +} + +static LIST_HEAD(targets); +static LIST_HEAD(initiators); +static LIST_HEAD(localities); + +static DEFINE_MUTEX(target_lock); + +/* + * The defined enum order is used to prioritize attributes to break ties when + * selecting the best performing node. + */ +enum locality_types { + WRITE_LATENCY, + READ_LATENCY, + WRITE_BANDWIDTH, + READ_BANDWIDTH, +}; + +static struct memory_locality *localities_types[4]; + +struct target_cache { + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +enum { + NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX, + NODE_ACCESS_CLASS_GENPORT_SINK_CPU, + NODE_ACCESS_CLASS_MAX, +}; + +struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct resource memregions; + struct access_coordinate coord[NODE_ACCESS_CLASS_MAX]; + struct list_head caches; + struct node_cache_attrs cache_attrs; + u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE]; + bool registered; +}; + +struct memory_initiator { + struct list_head node; + unsigned int processor_pxm; + bool has_cpu; +}; + +struct memory_locality { + struct list_head node; + struct acpi_hmat_locality *hmat_loc; +}; + +static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm) +{ + struct memory_initiator *initiator; + + list_for_each_entry(initiator, &initiators, node) + if (initiator->processor_pxm == cpu_pxm) + return initiator; + return NULL; +} + +static struct memory_target *find_mem_target(unsigned int mem_pxm) +{ + struct memory_target *target; + + list_for_each_entry(target, &targets, node) + if (target->memory_pxm == mem_pxm) + return target; + return NULL; +} + +/** + * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size + * @backing_res: resource from the backing media + * @nid: node id for the memory region + * @cache_size: (Output) size of extended linear cache. + * + * Return: 0 on success. Errno on failure. + * + */ +int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, + resource_size_t *cache_size) +{ + unsigned int pxm = node_to_pxm(nid); + struct memory_target *target; + struct target_cache *tcache; + struct resource *res; + + target = find_mem_target(pxm); + if (!target) + return -ENOENT; + + list_for_each_entry(tcache, &target->caches, node) { + if (tcache->cache_attrs.address_mode != + NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR) + continue; + + res = &target->memregions; + if (!resource_contains(res, backing_res)) + continue; + + *cache_size = tcache->cache_attrs.size; + return 0; + } + + *cache_size = 0; + return 0; +} +EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL"); + +static struct memory_target *acpi_find_genport_target(u32 uid) +{ + struct memory_target *target; + u32 target_uid; + u8 *uid_ptr; + + list_for_each_entry(target, &targets, node) { + uid_ptr = target->gen_port_device_handle + 8; + target_uid = *(u32 *)uid_ptr; + if (uid == target_uid) + return target; + } + + return NULL; +} + +/** + * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port + * @uid: ACPI unique id + * @coord: The access coordinates written back out for the generic port. + * Expect 2 levels array. + * + * Return: 0 on success. Errno on failure. + * + * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL. + */ +int acpi_get_genport_coordinates(u32 uid, + struct access_coordinate *coord) +{ + struct memory_target *target; + + guard(mutex)(&target_lock); + target = acpi_find_genport_target(uid); + if (!target) + return -ENOENT; + + coord[ACCESS_COORDINATE_LOCAL] = + target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL]; + coord[ACCESS_COORDINATE_CPU] = + target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU]; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL"); + +static __init void alloc_memory_initiator(unsigned int cpu_pxm) +{ + struct memory_initiator *initiator; + + if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE) + return; + + initiator = find_mem_initiator(cpu_pxm); + if (initiator) + return; + + initiator = kzalloc(sizeof(*initiator), GFP_KERNEL); + if (!initiator) + return; + + initiator->processor_pxm = cpu_pxm; + initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU); + list_add_tail(&initiator->node, &initiators); +} + +static __init struct memory_target *alloc_target(unsigned int mem_pxm) +{ + struct memory_target *target; + + target = find_mem_target(mem_pxm); + if (!target) { + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) + return NULL; + target->memory_pxm = mem_pxm; + target->processor_pxm = PXM_INVAL; + target->memregions = (struct resource) { + .name = "ACPI mem", + .start = 0, + .end = -1, + .flags = IORESOURCE_MEM, + }; + list_add_tail(&target->node, &targets); + INIT_LIST_HEAD(&target->caches); + } + + return target; +} + +static __init void alloc_memory_target(unsigned int mem_pxm, + resource_size_t start, + resource_size_t len) +{ + struct memory_target *target; + + target = alloc_target(mem_pxm); + if (!target) + return; + + /* + * There are potentially multiple ranges per PXM, so record each + * in the per-target memregions resource tree. + */ + if (!__request_region(&target->memregions, start, len, "memory target", + IORESOURCE_MEM)) + pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n", + start, start + len, mem_pxm); +} + +static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle) +{ + struct memory_target *target; + + target = alloc_target(mem_pxm); + if (!target) + return; + + memcpy(target->gen_port_device_handle, handle, + ACPI_SRAT_DEVICE_HANDLE_SIZE); +} + +static __init const char *hmat_data_type(u8 type) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + return "Access Latency"; + case ACPI_HMAT_READ_LATENCY: + return "Read Latency"; + case ACPI_HMAT_WRITE_LATENCY: + return "Write Latency"; + case ACPI_HMAT_ACCESS_BANDWIDTH: + return "Access Bandwidth"; + case ACPI_HMAT_READ_BANDWIDTH: + return "Read Bandwidth"; + case ACPI_HMAT_WRITE_BANDWIDTH: + return "Write Bandwidth"; + default: + return "Reserved"; + } +} + +static __init const char *hmat_data_type_suffix(u8 type) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + return " nsec"; + case ACPI_HMAT_ACCESS_BANDWIDTH: + case ACPI_HMAT_READ_BANDWIDTH: + case ACPI_HMAT_WRITE_BANDWIDTH: + return " MB/s"; + default: + return ""; + } +} + +static u32 hmat_normalize(u16 entry, u64 base, u8 type) +{ + u32 value; + + /* + * Check for invalid and overflow values + */ + if (entry == 0xffff || !entry) + return 0; + else if (base > (UINT_MAX / (entry))) + return 0; + + /* + * Divide by the base unit for version 1, convert latency from + * picosenonds to nanoseconds if revision 2. + */ + value = entry * base; + if (hmat_revision == 1) { + if (value < 10) + return 0; + value = DIV_ROUND_UP(value, 10); + } else if (hmat_revision == 2) { + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + value = DIV_ROUND_UP(value, 1000); + break; + default: + break; + } + } + return value; +} + +static void hmat_update_target_access(struct memory_target *target, + u8 type, u32 value, int access) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + target->coord[access].read_latency = value; + target->coord[access].write_latency = value; + break; + case ACPI_HMAT_READ_LATENCY: + target->coord[access].read_latency = value; + break; + case ACPI_HMAT_WRITE_LATENCY: + target->coord[access].write_latency = value; + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + target->coord[access].read_bandwidth = value; + target->coord[access].write_bandwidth = value; + break; + case ACPI_HMAT_READ_BANDWIDTH: + target->coord[access].read_bandwidth = value; + break; + case ACPI_HMAT_WRITE_BANDWIDTH: + target->coord[access].write_bandwidth = value; + break; + default: + break; + } +} + +static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc) +{ + struct memory_locality *loc; + + loc = kzalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { + pr_notice_once("Failed to allocate HMAT locality\n"); + return; + } + + loc->hmat_loc = hmat_loc; + list_add_tail(&loc->node, &localities); + + switch (hmat_loc->data_type) { + case ACPI_HMAT_ACCESS_LATENCY: + localities_types[READ_LATENCY] = loc; + localities_types[WRITE_LATENCY] = loc; + break; + case ACPI_HMAT_READ_LATENCY: + localities_types[READ_LATENCY] = loc; + break; + case ACPI_HMAT_WRITE_LATENCY: + localities_types[WRITE_LATENCY] = loc; + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + localities_types[READ_BANDWIDTH] = loc; + localities_types[WRITE_BANDWIDTH] = loc; + break; + case ACPI_HMAT_READ_BANDWIDTH: + localities_types[READ_BANDWIDTH] = loc; + break; + case ACPI_HMAT_WRITE_BANDWIDTH: + localities_types[WRITE_BANDWIDTH] = loc; + break; + default: + break; + } +} + +static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm, + u8 mem_hier, u8 type, u32 value) +{ + struct memory_target *target = find_mem_target(tgt_pxm); + + if (mem_hier != ACPI_HMAT_MEMORY) + return; + + if (target && target->processor_pxm == init_pxm) { + hmat_update_target_access(target, type, value, + ACCESS_COORDINATE_LOCAL); + /* If the node has a CPU, update access ACCESS_COORDINATE_CPU */ + if (node_state(pxm_to_node(init_pxm), N_CPU)) + hmat_update_target_access(target, type, value, + ACCESS_COORDINATE_CPU); + } +} + +static __init int hmat_parse_locality(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_locality *hmat_loc = (void *)header; + unsigned int init, targ, total_size, ipds, tpds; + u32 *inits, *targs, value; + u16 *entries; + u8 type, mem_hier; + + if (hmat_loc->header.length < sizeof(*hmat_loc)) { + pr_notice("Unexpected locality header length: %u\n", + hmat_loc->header.length); + return -EINVAL; + } + + type = hmat_loc->data_type; + mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY; + ipds = hmat_loc->number_of_initiator_Pds; + tpds = hmat_loc->number_of_target_Pds; + total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + + sizeof(*inits) * ipds + sizeof(*targs) * tpds; + if (hmat_loc->header.length < total_size) { + pr_notice("Unexpected locality header length:%u, minimum required:%u\n", + hmat_loc->header.length, total_size); + return -EINVAL; + } + + pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", + hmat_loc->flags, hmat_data_type(type), ipds, tpds, + hmat_loc->entry_base_unit); + + inits = (u32 *)(hmat_loc + 1); + targs = inits + ipds; + entries = (u16 *)(targs + tpds); + for (init = 0; init < ipds; init++) { + alloc_memory_initiator(inits[init]); + for (targ = 0; targ < tpds; targ++) { + value = hmat_normalize(entries[init * tpds + targ], + hmat_loc->entry_base_unit, + type); + pr_debug(" Initiator-Target[%u-%u]:%u%s\n", + inits[init], targs[targ], value, + hmat_data_type_suffix(type)); + + hmat_update_target(targs[targ], inits[init], + mem_hier, type, value); + } + } + + if (mem_hier == ACPI_HMAT_MEMORY) + hmat_add_locality(hmat_loc); + + return 0; +} + +static __init int hmat_parse_cache(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_cache *cache = (void *)header; + struct memory_target *target; + struct target_cache *tcache; + u32 attrs; + + if (cache->header.length < sizeof(*cache)) { + pr_notice("Unexpected cache header length: %u\n", + cache->header.length); + return -EINVAL; + } + + attrs = cache->cache_attributes; + pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", + cache->memory_PD, cache->cache_size, attrs, + cache->number_of_SMBIOShandles); + + target = find_mem_target(cache->memory_PD); + if (!target) + return 0; + + tcache = kzalloc(sizeof(*tcache), GFP_KERNEL); + if (!tcache) { + pr_notice_once("Failed to allocate HMAT cache info\n"); + return 0; + } + + tcache->cache_attrs.size = cache->cache_size; + tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; + tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; + + switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { + case ACPI_HMAT_CA_DIRECT_MAPPED: + tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; + /* Extended Linear mode is only valid if cache is direct mapped */ + if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) { + tcache->cache_attrs.address_mode = + NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR; + } + break; + case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING: + tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; + break; + case ACPI_HMAT_CA_NONE: + default: + tcache->cache_attrs.indexing = NODE_CACHE_OTHER; + break; + } + + switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) { + case ACPI_HMAT_CP_WB: + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; + break; + case ACPI_HMAT_CP_WT: + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; + break; + case ACPI_HMAT_CP_NONE: + default: + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; + break; + } + list_add_tail(&tcache->node, &target->caches); + + return 0; +} + +static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_proximity_domain *p = (void *)header; + struct memory_target *target = NULL; + + if (p->header.length != sizeof(*p)) { + pr_notice("Unexpected address range header length: %u\n", + p->header.length); + return -EINVAL; + } + + if (hmat_revision == 1) + pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", + p->reserved3, p->reserved4, p->flags, p->processor_PD, + p->memory_PD); + else + pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", + p->flags, p->processor_PD, p->memory_PD); + + if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || + hmat_revision > 1) { + target = find_mem_target(p->memory_PD); + if (!target) { + pr_debug("Memory Domain missing from SRAT\n"); + return -EINVAL; + } + } + if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) { + int p_node = pxm_to_node(p->processor_PD); + + if (p_node == NUMA_NO_NODE) { + pr_debug("Invalid Processor Domain\n"); + return -EINVAL; + } + target->processor_pxm = p->processor_PD; + } + + return 0; +} + +static int __init hmat_parse_subtable(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_structure *hdr = (void *)header; + + if (!hdr) + return -EINVAL; + + switch (hdr->type) { + case ACPI_HMAT_TYPE_PROXIMITY: + return hmat_parse_proximity_domain(header, end); + case ACPI_HMAT_TYPE_LOCALITY: + return hmat_parse_locality(header, end); + case ACPI_HMAT_TYPE_CACHE: + return hmat_parse_cache(header, end); + default: + return -EINVAL; + } +} + +static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_mem_affinity *ma = (void *)header; + + if (!ma) + return -EINVAL; + if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) + return 0; + alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length); + return 0; +} + +static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_generic_affinity *ga = (void *)header; + + if (!ga) + return -EINVAL; + + if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) + return 0; + + /* Skip PCI device_handle for now */ + if (ga->device_handle_type != 0) + return 0; + + alloc_genport_target(ga->proximity_domain, + (u8 *)ga->device_handle); + + return 0; +} + +static u32 hmat_initiator_perf(struct memory_target *target, + struct memory_initiator *initiator, + struct acpi_hmat_locality *hmat_loc) +{ + unsigned int ipds, tpds, i, idx = 0, tdx = 0; + u32 *inits, *targs; + u16 *entries; + + ipds = hmat_loc->number_of_initiator_Pds; + tpds = hmat_loc->number_of_target_Pds; + inits = (u32 *)(hmat_loc + 1); + targs = inits + ipds; + entries = (u16 *)(targs + tpds); + + for (i = 0; i < ipds; i++) { + if (inits[i] == initiator->processor_pxm) { + idx = i; + break; + } + } + + if (i == ipds) + return 0; + + for (i = 0; i < tpds; i++) { + if (targs[i] == target->memory_pxm) { + tdx = i; + break; + } + } + if (i == tpds) + return 0; + + return hmat_normalize(entries[idx * tpds + tdx], + hmat_loc->entry_base_unit, + hmat_loc->data_type); +} + +static bool hmat_update_best(u8 type, u32 value, u32 *best) +{ + bool updated = false; + + if (!value) + return false; + + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + if (!*best || *best > value) { + *best = value; + updated = true; + } + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + case ACPI_HMAT_READ_BANDWIDTH: + case ACPI_HMAT_WRITE_BANDWIDTH: + if (!*best || *best < value) { + *best = value; + updated = true; + } + break; + } + + return updated; +} + +static int initiator_cmp(void *priv, const struct list_head *a, + const struct list_head *b) +{ + struct memory_initiator *ia; + struct memory_initiator *ib; + + ia = list_entry(a, struct memory_initiator, node); + ib = list_entry(b, struct memory_initiator, node); + + return ia->processor_pxm - ib->processor_pxm; +} + +static int initiators_to_nodemask(unsigned long *p_nodes) +{ + struct memory_initiator *initiator; + + if (list_empty(&initiators)) + return -ENXIO; + + list_for_each_entry(initiator, &initiators, node) + set_bit(initiator->processor_pxm, p_nodes); + + return 0; +} + +static void hmat_update_target_attrs(struct memory_target *target, + unsigned long *p_nodes, int access) +{ + struct memory_initiator *initiator; + unsigned int cpu_nid; + struct memory_locality *loc = NULL; + u32 best = 0; + int i; + + /* Don't update for generic port if there's no device handle */ + if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL || + access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) && + !(*(u16 *)target->gen_port_device_handle)) + return; + + bitmap_zero(p_nodes, MAX_NUMNODES); + /* + * If the Address Range Structure provides a local processor pxm, set + * only that one. Otherwise, find the best performance attributes and + * collect all initiators that match. + */ + if (target->processor_pxm != PXM_INVAL) { + cpu_nid = pxm_to_node(target->processor_pxm); + if (access == ACCESS_COORDINATE_LOCAL || + node_state(cpu_nid, N_CPU)) { + set_bit(target->processor_pxm, p_nodes); + return; + } + } + + if (list_empty(&localities)) + return; + + /* + * We need the initiator list sorted so we can use bitmap_clear for + * previously set initiators when we find a better memory accessor. + * We'll also use the sorting to prime the candidate nodes with known + * initiators. + */ + list_sort(NULL, &initiators, initiator_cmp); + if (initiators_to_nodemask(p_nodes) < 0) + return; + + for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { + loc = localities_types[i]; + if (!loc) + continue; + + best = 0; + list_for_each_entry(initiator, &initiators, node) { + u32 value; + + if ((access == ACCESS_COORDINATE_CPU || + access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) && + !initiator->has_cpu) { + clear_bit(initiator->processor_pxm, p_nodes); + continue; + } + if (!test_bit(initiator->processor_pxm, p_nodes)) + continue; + + value = hmat_initiator_perf(target, initiator, loc->hmat_loc); + if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) + bitmap_clear(p_nodes, 0, initiator->processor_pxm); + if (value != best) + clear_bit(initiator->processor_pxm, p_nodes); + } + if (best) + hmat_update_target_access(target, loc->hmat_loc->data_type, best, access); + } +} + +static void __hmat_register_target_initiators(struct memory_target *target, + unsigned long *p_nodes, + int access) +{ + unsigned int mem_nid, cpu_nid; + int i; + + mem_nid = pxm_to_node(target->memory_pxm); + hmat_update_target_attrs(target, p_nodes, access); + for_each_set_bit(i, p_nodes, MAX_NUMNODES) { + cpu_nid = pxm_to_node(i); + register_memory_node_under_compute_node(mem_nid, cpu_nid, access); + } +} + +static void hmat_update_generic_target(struct memory_target *target) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + + hmat_update_target_attrs(target, p_nodes, + NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL); + hmat_update_target_attrs(target, p_nodes, + NODE_ACCESS_CLASS_GENPORT_SINK_CPU); +} + +static void hmat_register_target_initiators(struct memory_target *target) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + + __hmat_register_target_initiators(target, p_nodes, + ACCESS_COORDINATE_LOCAL); + __hmat_register_target_initiators(target, p_nodes, + ACCESS_COORDINATE_CPU); +} + +static void hmat_register_target_cache(struct memory_target *target) +{ + unsigned mem_nid = pxm_to_node(target->memory_pxm); + struct target_cache *tcache; + + list_for_each_entry(tcache, &target->caches, node) + node_add_cache(mem_nid, &tcache->cache_attrs); +} + +static void hmat_register_target_perf(struct memory_target *target, int access) +{ + unsigned mem_nid = pxm_to_node(target->memory_pxm); + node_set_perf_attrs(mem_nid, &target->coord[access], access); +} + +static void hmat_register_target_devices(struct memory_target *target) +{ + struct resource *res; + + /* + * Do not bother creating devices if no driver is available to + * consume them. + */ + if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM)) + return; + + for (res = target->memregions.child; res; res = res->sibling) { + int target_nid = pxm_to_node(target->memory_pxm); + + hmem_register_resource(target_nid, res); + } +} + +static void hmat_hotplug_target(struct memory_target *target) +{ + int nid = pxm_to_node(target->memory_pxm); + + /* + * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP, + * "specific purpose", is applied to all the memory in a proximity + * domain leading to * the node being marked offline / unplugged, or if + * memory-only "hotplug" node is offline. + */ + if (nid == NUMA_NO_NODE || !node_online(nid)) + return; + + guard(mutex)(&target_lock); + if (target->registered) + return; + + hmat_register_target_initiators(target); + hmat_register_target_cache(target); + hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL); + hmat_register_target_perf(target, ACCESS_COORDINATE_CPU); + target->registered = true; +} + +static void hmat_register_target(struct memory_target *target) +{ + /* + * Devices may belong to either an offline or online + * node, so unconditionally add them. + */ + hmat_register_target_devices(target); + + /* + * Register generic port perf numbers. The nid may not be + * initialized and is still NUMA_NO_NODE. + */ + scoped_guard(mutex, &target_lock) { + if (*(u16 *)target->gen_port_device_handle) { + hmat_update_generic_target(target); + target->registered = true; + return; + } + } + + hmat_hotplug_target(target); +} + +static void hmat_register_targets(void) +{ + struct memory_target *target; + + list_for_each_entry(target, &targets, node) + hmat_register_target(target); +} + +static int hmat_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_target *target; + struct node_notify *nn = arg; + int pxm, nid = nn->nid; + + if (action != NODE_ADDED_FIRST_MEMORY) + return NOTIFY_OK; + + pxm = node_to_pxm(nid); + target = find_mem_target(pxm); + if (!target) + return NOTIFY_OK; + + hmat_hotplug_target(target); + return NOTIFY_OK; +} + +static int __init hmat_set_default_dram_perf(void) +{ + int rc; + int nid, pxm; + struct memory_target *target; + struct access_coordinate *attrs; + + for_each_node_mask(nid, default_dram_nodes) { + pxm = node_to_pxm(nid); + target = find_mem_target(pxm); + if (!target) + continue; + attrs = &target->coord[ACCESS_COORDINATE_CPU]; + rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT"); + if (rc) + return rc; + } + + return 0; +} + +static int hmat_calculate_adistance(struct notifier_block *self, + unsigned long nid, void *data) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + struct memory_target *target; + struct access_coordinate *perf; + int *adist = data; + int pxm; + + pxm = node_to_pxm(nid); + target = find_mem_target(pxm); + if (!target) + return NOTIFY_OK; + + mutex_lock(&target_lock); + hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU); + mutex_unlock(&target_lock); + + perf = &target->coord[ACCESS_COORDINATE_CPU]; + + if (mt_perf_to_adistance(perf, adist)) + return NOTIFY_OK; + + return NOTIFY_STOP; +} + +static struct notifier_block hmat_adist_nb __meminitdata = { + .notifier_call = hmat_calculate_adistance, + .priority = 100, +}; + +static __init void hmat_free_structures(void) +{ + struct memory_target *target, *tnext; + struct memory_locality *loc, *lnext; + struct memory_initiator *initiator, *inext; + struct target_cache *tcache, *cnext; + + list_for_each_entry_safe(target, tnext, &targets, node) { + struct resource *res, *res_next; + + list_for_each_entry_safe(tcache, cnext, &target->caches, node) { + list_del(&tcache->node); + kfree(tcache); + } + + list_del(&target->node); + res = target->memregions.child; + while (res) { + res_next = res->sibling; + __release_region(&target->memregions, res->start, + resource_size(res)); + res = res_next; + } + kfree(target); + } + + list_for_each_entry_safe(initiator, inext, &initiators, node) { + list_del(&initiator->node); + kfree(initiator); + } + + list_for_each_entry_safe(loc, lnext, &localities, node) { + list_del(&loc->node); + kfree(loc); + } +} + +static __init int hmat_init(void) +{ + struct acpi_table_header *tbl; + enum acpi_hmat_type i; + acpi_status status; + + if (srat_disabled() || hmat_disable) + return 0; + + status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl); + if (ACPI_FAILURE(status)) + return 0; + + if (acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_MEMORY_AFFINITY, + srat_parse_mem_affinity, 0) < 0) + goto out_put; + + if (acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY, + srat_parse_genport_affinity, 0) < 0) + goto out_put; + + acpi_put_table(tbl); + + status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); + if (ACPI_FAILURE(status)) + goto out_put; + + hmat_revision = tbl->revision; + switch (hmat_revision) { + case 1: + case 2: + break; + default: + pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision); + goto out_put; + } + + for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) { + if (acpi_table_parse_entries(ACPI_SIG_HMAT, + sizeof(struct acpi_table_hmat), i, + hmat_parse_subtable, 0) < 0) { + pr_notice("Ignoring: Invalid table"); + goto out_put; + } + } + hmat_register_targets(); + + /* Keep the table and structures if the notifier may use them */ + if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI)) + goto out_put; + + if (!hmat_set_default_dram_perf()) + register_mt_adistance_algorithm(&hmat_adist_nb); + + return 0; +out_put: + hmat_free_structures(); + acpi_put_table(tbl); + return 0; +} +subsys_initcall(hmat_init); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa/srat.c index 7bbbf8256a41..aa87ee1583a4 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa/srat.c @@ -1,22 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_numa.c - ACPI NUMA support * * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * */ #define pr_fmt(fmt) "ACPI: " fmt @@ -28,9 +14,12 @@ #include <linux/errno.h> #include <linux/acpi.h> #include <linux/memblock.h> +#include <linux/memory.h> #include <linux/numa.h> #include <linux/nodemask.h> #include <linux/topology.h> +#include <linux/numa_memblks.h> +#include <linux/string_choices.h> static nodemask_t nodes_found_map = NODE_MASK_NONE; @@ -41,14 +30,22 @@ static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; unsigned char acpi_srat_revision __initdata; -int acpi_numa __initdata; +static int acpi_numa __initdata; + +static int last_real_pxm; + +void __init disable_srat(void) +{ + acpi_numa = -1; +} int pxm_to_node(int pxm) { - if (pxm < 0) + if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } +EXPORT_SYMBOL(pxm_to_node); int node_to_pxm(int node) { @@ -56,6 +53,7 @@ int node_to_pxm(int node) return PXM_INVAL; return node_to_pxm_map[node]; } +EXPORT_SYMBOL_GPL(node_to_pxm); static void __acpi_map_pxm_to_node(int pxm, int node) { @@ -75,56 +73,111 @@ int acpi_map_pxm_to_node(int pxm) node = pxm_to_node_map[pxm]; if (node == NUMA_NO_NODE) { - if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) - return NUMA_NO_NODE; node = first_unset_node(nodes_found_map); + if (node >= MAX_NUMNODES) + return NUMA_NO_NODE; __acpi_map_pxm_to_node(pxm, node); node_set(node, nodes_found_map); } return node; } +EXPORT_SYMBOL(acpi_map_pxm_to_node); -/** - * acpi_map_pxm_to_online_node - Map proximity ID to online node - * @pxm: ACPI proximity ID - * - * This is similar to acpi_map_pxm_to_node(), but always returns an online - * node. When the mapped node from a given proximity ID is offline, it - * looks up the node distance table and returns the nearest online node. - * - * ACPI device drivers, which are called after the NUMA initialization has - * completed in the kernel, can call this interface to obtain their device - * NUMA topology from ACPI tables. Such drivers do not have to deal with - * offline nodes. A node may be offline when a device proximity ID is - * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. - * "numa=off" on x86. +#ifdef CONFIG_NUMA_EMU +/* + * Take max_nid - 1 fake-numa nodes into account in both + * pxm_to_node_map()/node_to_pxm_map[] tables. */ -int acpi_map_pxm_to_online_node(int pxm) +int __init fix_pxm_node_maps(int max_nid) { - int node, min_node; - - node = acpi_map_pxm_to_node(pxm); - - if (node == NUMA_NO_NODE) - node = 0; + static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata + = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; + static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata + = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; + int i, j, index = -1, count = 0; + nodemask_t nodes_to_enable; - min_node = node; - if (!node_online(node)) { - int min_dist = INT_MAX, dist, n; + if (numa_off) + return -1; - for_each_online_node(n) { - dist = node_distance(node, n); - if (dist < min_dist) { - min_dist = dist; - min_node = n; + /* no or incomplete node/PXM mapping set, nothing to do */ + if (srat_disabled()) + return 0; + + /* find fake nodes PXM mapping */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (node_to_pxm_map[i] != PXM_INVAL) { + for (j = 0; j <= max_nid; j++) { + if ((emu_nid_to_phys[j] == i) && + WARN(node_to_pxm_map_copy[j] != PXM_INVAL, + "Node %d is already binded to PXM %d\n", + j, node_to_pxm_map_copy[j])) + return -1; + if (emu_nid_to_phys[j] == i) { + node_to_pxm_map_copy[j] = + node_to_pxm_map[i]; + if (j > index) + index = j; + count++; + } } } } + if (index == -1) { + pr_debug("No node/PXM mapping has been set\n"); + /* nothing more to be done */ + return 0; + } + if (WARN(index != max_nid, "%d max nid when expected %d\n", + index, max_nid)) + return -1; + + nodes_clear(nodes_to_enable); + + /* map phys nodes not used for fake nodes */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (node_to_pxm_map[i] != PXM_INVAL) { + for (j = 0; j <= max_nid; j++) + if (emu_nid_to_phys[j] == i) + break; + /* fake nodes PXM mapping has been done */ + if (j <= max_nid) + continue; + /* find first hole */ + for (j = 0; + j < MAX_NUMNODES && + node_to_pxm_map_copy[j] != PXM_INVAL; + j++) + ; + if (WARN(j == MAX_NUMNODES, + "Number of nodes exceeds MAX_NUMNODES\n")) + return -1; + node_to_pxm_map_copy[j] = node_to_pxm_map[i]; + node_set(j, nodes_to_enable); + count++; + } + } + + /* creating reverse mapping in pxm_to_node_map[] */ + for (i = 0; i < MAX_NUMNODES; i++) + if (node_to_pxm_map_copy[i] != PXM_INVAL && + pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE) + pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i; + + /* overwrite with new mapping */ + for (i = 0; i < MAX_NUMNODES; i++) { + node_to_pxm_map[i] = node_to_pxm_map_copy[i]; + pxm_to_node_map[i] = pxm_to_node_map_copy[i]; + } + + /* enable other nodes found in PXM for hotplug */ + nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed); - return min_node; + pr_debug("found %d total number of nodes\n", count); + return 0; } -EXPORT_SYMBOL(acpi_map_pxm_to_online_node); +#endif static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) @@ -137,8 +190,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", p->apic_id, p->local_sapic_eid, p->proximity_domain_lo, - (p->flags & ACPI_SRAT_CPU_ENABLED) ? - "enabled" : "disabled"); + str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED)); } break; @@ -150,8 +202,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) (unsigned long long)p->base_address, (unsigned long long)p->length, p->proximity_domain, - (p->flags & ACPI_SRAT_MEM_ENABLED) ? - "enabled" : "disabled", + str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED), (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? " hot-pluggable" : "", (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ? @@ -166,8 +217,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n", p->apic_id, p->proximity_domain, - (p->flags & ACPI_SRAT_CPU_ENABLED) ? - "enabled" : "disabled"); + str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED)); } break; @@ -178,8 +228,47 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", p->acpi_processor_uid, p->proximity_domain, - (p->flags & ACPI_SRAT_GICC_ENABLED) ? - "enabled" : "disabled"); + str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED)); + } + break; + + case ACPI_SRAT_TYPE_GENERIC_AFFINITY: + { + struct acpi_srat_generic_affinity *p = + (struct acpi_srat_generic_affinity *)header; + + if (p->device_handle_type == 1) { + /* + * For pci devices this may be the only place they + * are assigned a proximity domain + */ + pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n", + *(u16 *)(&p->device_handle[0]), + *(u16 *)(&p->device_handle[2]), + p->proximity_domain, + str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)); + } else { + /* + * In this case we can rely on the device having a + * proximity domain reference + */ + pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n", + (char *)(&p->device_handle[0]), + (char *)(&p->device_handle[8]), + p->proximity_domain, + str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)); + } + } + break; + + case ACPI_SRAT_TYPE_RINTC_AFFINITY: + { + struct acpi_srat_rintc_affinity *p = + (struct acpi_srat_rintc_affinity *)header; + pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", + p->acpi_processor_uid, + p->proximity_domain, + str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED)); } break; @@ -201,7 +290,7 @@ static int __init slit_valid(struct acpi_table_slit *slit) int i, j; int d = slit->locality_count; for (i = 0; i < d; i++) { - for (j = 0; j < d; j++) { + for (j = 0; j < d; j++) { u8 val = slit->entry[d*i + j]; if (i == j) { if (val != LOCAL_DISTANCE) @@ -216,7 +305,7 @@ static int __init slit_valid(struct acpi_table_slit *slit) void __init bad_srat(void) { pr_err("SRAT: SRAT not used.\n"); - acpi_numa = -1; + disable_srat(); } int __init srat_disabled(void) @@ -224,16 +313,26 @@ int __init srat_disabled(void) return acpi_numa < 0; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) +__weak int __init numa_fill_memblks(u64 start, u64 end) +{ + return NUMA_NO_MEMBLK; +} + /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are * not supported at this point. */ -void __init acpi_numa_slit_init(struct acpi_table_slit *slit) +static int __init acpi_parse_slit(struct acpi_table_header *table) { + struct acpi_table_slit *slit = (struct acpi_table_slit *)table; int i, j; + if (!slit_valid(slit)) { + pr_info("SLIT table looks invalid. Not used.\n"); + return -EINVAL; + } + for (i = 0; i < slit->locality_count; i++) { const int from_node = pxm_to_node(i); @@ -250,31 +349,36 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) slit->entry[slit->locality_count * i + j]); } } + + return 0; } -/* - * Default callback for parsing of the Proximity Domain <-> Memory - * Area mappings - */ -int __init -acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) +static int parsed_numa_memblks __initdata; + +static int __init +acpi_parse_memory_affinity(union acpi_subtable_headers *header, + const unsigned long table_end) { + struct acpi_srat_mem_affinity *ma; u64 start, end; u32 hotpluggable; int node, pxm; + ma = (struct acpi_srat_mem_affinity *)header; + + acpi_table_print_srat_entry(&header->common); + if (srat_disabled()) - goto out_err; + return 0; if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { pr_err("SRAT: Unexpected header length: %d\n", ma->header.length); goto out_err_bad_srat; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) - goto out_err; - hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE; - if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) - goto out_err; + return 0; + hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && + (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE); start = ma->base_address; end = start + ma->length; @@ -283,7 +387,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) pxm &= 0xff; node = acpi_map_pxm_to_node(pxm); - if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + if (node == NUMA_NO_NODE) { pr_err("SRAT: Too many proximity domains.\n"); goto out_err_bad_srat; } @@ -310,24 +414,65 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); + parsed_numa_memblks++; + return 0; + out_err_bad_srat: + /* Just disable SRAT, but do not fail and ignore errors. */ bad_srat(); -out_err: - return -EINVAL; + + return 0; } -#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ -static int __init acpi_parse_slit(struct acpi_table_header *table) +static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, + void *arg, const unsigned long table_end) { - struct acpi_table_slit *slit = (struct acpi_table_slit *)table; + struct acpi_cedt_cfmws *cfmws; + int *fake_pxm = arg; + u64 start, end, align; + int node; + int err; - if (!slit_valid(slit)) { - pr_info("SLIT table looks invalid. Not used.\n"); + cfmws = (struct acpi_cedt_cfmws *)header; + start = cfmws->base_hpa; + end = cfmws->base_hpa + cfmws->window_size; + + /* Align memblock size to CFMW regions if possible */ + align = 1UL << __ffs(start | end); + if (align >= SZ_256M) { + err = memory_block_advise_max_size(align); + if (err) + pr_warn("CFMWS: memblock size advise failed (%d)\n", err); + } else + pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n"); + + /* + * The SRAT may have already described NUMA details for all, + * or a portion of, this CFMWS HPA range. Extend the memblks + * found for any portion of the window to cover the entire + * window. + */ + if (!numa_fill_memblks(start, end)) + return 0; + + /* No SRAT description. Create a new node. */ + node = acpi_map_pxm_to_node(*fake_pxm); + + if (node == NUMA_NO_NODE) { + pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n"); return -EINVAL; } - acpi_numa_slit_init(slit); + if (numa_add_reserved_memblk(node, start, end) < 0) { + /* CXL driver must handle the NUMA_NO_NODE case */ + pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", + node, start, end); + } + node_set(node, numa_nodes_parsed); + + /* Set the next available fake_pxm value */ + (*fake_pxm)++; return 0; } @@ -338,16 +483,14 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) } static int __init -acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, +acpi_parse_x2apic_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_x2apic_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header; - if (!processor_affinity) - return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_x2apic_affinity_init(processor_affinity); @@ -356,16 +499,14 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, } static int __init -acpi_parse_processor_affinity(struct acpi_subtable_header *header, +acpi_parse_processor_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_cpu_affinity *)header; - if (!processor_affinity) - return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_processor_affinity_init(processor_affinity); @@ -374,16 +515,14 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, } static int __init -acpi_parse_gicc_affinity(struct acpi_subtable_header *header, +acpi_parse_gicc_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_gicc_affinity *processor_affinity; processor_affinity = (struct acpi_srat_gicc_affinity *)header; - if (!processor_affinity) - return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_gicc_affinity_init(processor_affinity); @@ -391,23 +530,53 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header, return 0; } -static int __initdata parsed_numa_memblks; - +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) static int __init -acpi_parse_memory_affinity(struct acpi_subtable_header * header, - const unsigned long end) +acpi_parse_gi_affinity(union acpi_subtable_headers *header, + const unsigned long end) { - struct acpi_srat_mem_affinity *memory_affinity; + struct acpi_srat_generic_affinity *gi_affinity; + int node; - memory_affinity = (struct acpi_srat_mem_affinity *)header; - if (!memory_affinity) + gi_affinity = (struct acpi_srat_generic_affinity *)header; + if (!gi_affinity) return -EINVAL; + acpi_table_print_srat_entry(&header->common); - acpi_table_print_srat_entry(header); + if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) + return -EINVAL; + + node = acpi_map_pxm_to_node(gi_affinity->proximity_domain); + if (node == NUMA_NO_NODE) { + pr_err("SRAT: Too many proximity domains.\n"); + return -EINVAL; + } + node_set(node, numa_nodes_parsed); + node_set_state(node, N_GENERIC_INITIATOR); + + return 0; +} +#else +static int __init +acpi_parse_gi_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} +#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ + +static int __init +acpi_parse_rintc_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_rintc_affinity *rintc_affinity; + + rintc_affinity = (struct acpi_srat_rintc_affinity *)header; + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ - if (!acpi_numa_memory_affinity_init(memory_affinity)) - parsed_numa_memblks++; + acpi_numa_rintc_affinity_init(rintc_affinity); + return 0; } @@ -433,7 +602,7 @@ acpi_table_parse_srat(enum acpi_srat_type id, int __init acpi_numa_init(void) { - int cnt = 0; + int i, fake_pxm, cnt = 0; if (acpi_disabled) return -EINVAL; @@ -446,7 +615,7 @@ int __init acpi_numa_init(void) /* SRAT: System Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { - struct acpi_subtable_proc srat_proc[3]; + struct acpi_subtable_proc srat_proc[5]; memset(srat_proc, 0, sizeof(srat_proc)); srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; @@ -455,6 +624,10 @@ int __init acpi_numa_init(void) srat_proc[1].handler = acpi_parse_x2apic_affinity; srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY; srat_proc[2].handler = acpi_parse_gicc_affinity; + srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; + srat_proc[3].handler = acpi_parse_gi_affinity; + srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY; + srat_proc[4].handler = acpi_parse_rintc_affinity; acpi_table_parse_entries_array(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), @@ -467,6 +640,23 @@ int __init acpi_numa_init(void) /* SLIT: System Locality Information Table */ acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); + /* + * CXL Fixed Memory Window Structures (CFMWS) must be parsed + * after the SRAT. Create NUMA Nodes for CXL memory ranges that + * are defined in the CFMWS and not already defined in the SRAT. + * Initialize a fake_pxm as the first available PXM to emulate. + */ + + /* fake_pxm is the next unused PXM value after SRAT parsing */ + for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) { + if (node_to_pxm_map[i] > fake_pxm) + fake_pxm = node_to_pxm_map[i]; + } + last_real_pxm = fake_pxm; + fake_pxm++; + acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws, + &fake_pxm); + if (cnt < 0) return cnt; else if (!parsed_numa_memblks) @@ -474,6 +664,14 @@ int __init acpi_numa_init(void) return 0; } +bool acpi_node_backed_by_real_pxm(int nid) +{ + int pxm = node_to_pxm(nid); + + return pxm <= last_real_pxm; +} +EXPORT_SYMBOL_GPL(acpi_node_backed_by_real_pxm); + static int acpi_get_pxm(acpi_handle h) { unsigned long long pxm; @@ -497,6 +695,6 @@ int acpi_get_node(acpi_handle handle) pxm = acpi_get_pxm(handle); - return acpi_map_pxm_to_node(pxm); + return pxm_to_node(pxm); } EXPORT_SYMBOL(acpi_get_node); diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c index 85287b8fe3aa..a2b11069e792 100644 --- a/drivers/acpi/nvs.c +++ b/drivers/acpi/nvs.c @@ -1,11 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * nvs.c - Routines for saving and restoring ACPI NVS memory region * * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. - * - * This file is released under the GPLv2. */ +#define pr_fmt(fmt) "ACPI: PM: " fmt + #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> @@ -83,19 +84,19 @@ struct nvs_page { static LIST_HEAD(nvs_list); /** - * suspend_nvs_register - register platform NVS memory region to save - * @start - physical address of the region - * @size - size of the region + * suspend_nvs_register - register platform NVS memory region to save + * @start: Physical address of the region. + * @size: Size of the region. * - * The NVS region need not be page-aligned (both ends) and we arrange - * things so that the data from page-aligned addresses in this region will - * be copied into separate RAM pages. + * The NVS region need not be page-aligned (both ends) and we arrange + * things so that the data from page-aligned addresses in this region will + * be copied into separate RAM pages. */ static int suspend_nvs_register(unsigned long start, unsigned long size) { struct nvs_page *entry, *next; - pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n", + pr_info("Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n", start, start + size - 1, size); while (size > 0) { @@ -124,7 +125,7 @@ static int suspend_nvs_register(unsigned long start, unsigned long size) } /** - * suspend_nvs_free - free data pages allocated for saving NVS regions + * suspend_nvs_free - free data pages allocated for saving NVS regions */ void suspend_nvs_free(void) { @@ -148,7 +149,7 @@ void suspend_nvs_free(void) } /** - * suspend_nvs_alloc - allocate memory necessary for saving NVS regions + * suspend_nvs_alloc - allocate memory necessary for saving NVS regions */ int suspend_nvs_alloc(void) { @@ -165,13 +166,13 @@ int suspend_nvs_alloc(void) } /** - * suspend_nvs_save - save NVS memory regions + * suspend_nvs_save - save NVS memory regions */ int suspend_nvs_save(void) { struct nvs_page *entry; - printk(KERN_INFO "PM: Saving platform NVS memory\n"); + pr_info("Saving platform NVS memory\n"); list_for_each_entry(entry, &nvs_list, node) if (entry->data) { @@ -194,16 +195,16 @@ int suspend_nvs_save(void) } /** - * suspend_nvs_restore - restore NVS memory regions + * suspend_nvs_restore - restore NVS memory regions * - * This function is going to be called with interrupts disabled, so it - * cannot iounmap the virtual addresses used to access the NVS region. + * This function is going to be called with interrupts disabled, so it + * cannot iounmap the virtual addresses used to access the NVS region. */ void suspend_nvs_restore(void) { struct nvs_page *entry; - printk(KERN_INFO "PM: Restoring platform NVS memory\n"); + pr_info("Restoring platform NVS memory\n"); list_for_each_entry(entry, &nvs_list, node) if (entry->data) diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index efd2ce099893..f2c943b934be 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * osi.c - _OSI implementation * * Copyright (C) 2016 Intel Corporation * Author: Lv Zheng <lv.zheng@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Uncomment next line to get verbose printout */ @@ -55,32 +42,7 @@ static struct acpi_osi_entry osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { {"Module Device", true}, {"Processor Device", true}, - {"3.0 _SCP Extensions", true}, {"Processor Aggregator Device", true}, - /* - * Linux-Dell-Video is used by BIOS to disable RTD3 for NVidia graphics - * cards as RTD3 is not supported by drivers now. Systems with NVidia - * cards will hang without RTD3 disabled. - * - * Once NVidia drivers officially support RTD3, this _OSI strings can - * be removed if both new and old graphics cards are supported. - */ - {"Linux-Dell-Video", true}, - /* - * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI - * audio device which is turned off for power-saving in Windows OS. - * This power management feature observed on some Lenovo Thinkpad - * systems which will not be able to output audio via HDMI without - * a BIOS workaround. - */ - {"Linux-Lenovo-NV-HDMI-Audio", true}, - /* - * Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to - * output video directly to external monitors on HP Inc. mobile - * workstations as Nvidia and AMD VGA drivers provide limited - * hybrid graphics supports. - */ - {"Linux-HPI-Hybrid-Graphics", true}, }; static u32 acpi_osi_handler(acpi_string interface, u32 supported) @@ -147,7 +109,7 @@ void __init acpi_osi_setup(char *str) break; } else if (osi->string[0] == '\0') { osi->enable = enable; - strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); + strscpy(osi->string, str, OSI_STRING_LENGTH_MAX); break; } } @@ -486,9 +448,9 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { */ /* - * Without this this EEEpc exports a non working WMI interface, with - * this it exports a working "good old" eeepc_laptop interface, fixing - * both brightness control, and rfkill not working. + * Without this EEEpc exports a non working WMI interface, with + * this it exports a working "good old" eeepc_laptop interface, + * fixing both brightness control, and rfkill not working. */ { .callback = dmi_enable_osi_linux, diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index f29e427d0d1d..05393a7315fe 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_osl.c - OS-dependent functions ($Revision: 83 $) * @@ -6,28 +7,16 @@ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (c) 2008 Intel Corporation * Author: Matthew Wilcox <willy@linux.intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * */ +#define pr_fmt(fmt) "ACPI: OSL: " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/lockdep.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/kmod.h> @@ -40,15 +29,16 @@ #include <linux/list.h> #include <linux/jiffies.h> #include <linux/semaphore.h> +#include <linux/security.h> #include <asm/io.h> #include <linux/uaccess.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "acpica/accommon.h" -#include "acpica/acnamesp.h" #include "internal.h" +/* Definitions for ACPI_DEBUG_PRINT() */ #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osl"); @@ -89,11 +79,15 @@ struct acpi_ioremap { void __iomem *virt; acpi_physical_address phys; acpi_size size; - unsigned long refcount; + union { + unsigned long refcount; + struct rcu_work rwork; + } track; }; static LIST_HEAD(acpi_ioremaps); static DEFINE_MUTEX(acpi_ioremap_lock); +#define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) static void __init acpi_request_region (struct acpi_generic_address *gas, unsigned int length, char *desc) @@ -155,7 +149,7 @@ void acpi_os_printf(const char *fmt, ...) } EXPORT_SYMBOL(acpi_os_printf); -void acpi_os_vprintf(const char *fmt, va_list args) +void __printf(1, 0) acpi_os_vprintf(const char *fmt, va_list args) { static char buffer[512]; @@ -194,8 +188,19 @@ acpi_physical_address __init acpi_os_get_root_pointer(void) acpi_physical_address pa; #ifdef CONFIG_KEXEC - if (acpi_rsdp) + /* + * We may have been provided with an RSDP on the command line, + * but if a malicious user has done so they may be pointing us + * at modified ACPI tables that could alter kernel behaviour - + * so, we check the lockdown status before making use of + * it. If we trust it then also stash it in an architecture + * specific location (if appropriate) so it can be carried + * over further kexec()s. + */ + if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { + acpi_arch_set_root_pointer(acpi_rsdp); return acpi_rsdp; + } #endif pa = acpi_arch_get_root_pointer(); if (pa) @@ -206,7 +211,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void) return efi.acpi20; if (efi.acpi != EFI_INVALID_TABLE_ADDR) return efi.acpi; - pr_err(PREFIX "System description tables not found\n"); + pr_err("System description tables not found\n"); } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { acpi_find_root_pointer(&pa); } @@ -220,7 +225,7 @@ acpi_map_lookup(acpi_physical_address phys, acpi_size size) { struct acpi_ioremap *map; - list_for_each_entry_rcu(map, &acpi_ioremaps, list) + list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) if (map->phys <= phys && phys + size <= map->phys + map->size) return map; @@ -250,7 +255,7 @@ void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) map = acpi_map_lookup(phys, size); if (map) { virt = map->virt + (phys - map->phys); - map->refcount++; + map->track.refcount++; } mutex_unlock(&acpi_ioremap_lock); return virt; @@ -263,7 +268,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) { struct acpi_ioremap *map; - list_for_each_entry_rcu(map, &acpi_ioremaps, list) + list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) if (map->virt <= virt && virt + size <= map->virt + map->size) return map; @@ -271,7 +276,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) return NULL; } -#if defined(CONFIG_IA64) || defined(CONFIG_ARM64) +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) /* ioremap will take care of cache attributes */ #define should_use_kmap(pfn) 0 #else @@ -315,8 +320,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) * During early init (when acpi_permanent_mmap has not been set yet) this * routine simply calls __acpi_map_table() to get the job done. */ -void __iomem *__ref -acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) +void __iomem __ref +*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) { struct acpi_ioremap *map; void __iomem *virt; @@ -324,7 +329,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) acpi_size pg_sz; if (phys > ULONG_MAX) { - printk(KERN_ERR PREFIX "Cannot map memory that high\n"); + pr_err("Cannot map memory that high: 0x%llx\n", phys); return NULL; } @@ -335,7 +340,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) /* Check if there's a suitable mapping already. */ map = acpi_map_lookup(phys, size); if (map) { - map->refcount++; + map->track.refcount++; goto out; } @@ -347,7 +352,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) pg_off = round_down(phys, PAGE_SIZE); pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; - virt = acpi_map(pg_off, pg_sz); + virt = acpi_map(phys, size); if (!virt) { mutex_unlock(&acpi_ioremap_lock); kfree(map); @@ -355,10 +360,10 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) } INIT_LIST_HEAD(&map->list); - map->virt = virt; + map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); map->phys = pg_off; map->size = pg_sz; - map->refcount = 1; + map->track.refcount = 1; list_add_tail_rcu(&map->list, &acpi_ioremaps); @@ -374,19 +379,26 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) } EXPORT_SYMBOL_GPL(acpi_os_map_memory); -static void acpi_os_drop_map_ref(struct acpi_ioremap *map) +static void acpi_os_map_remove(struct work_struct *work) { - if (!--map->refcount) - list_del_rcu(&map->list); + struct acpi_ioremap *map = container_of(to_rcu_work(work), + struct acpi_ioremap, + track.rwork); + + acpi_unmap(map->phys, map->virt); + kfree(map); } -static void acpi_os_map_cleanup(struct acpi_ioremap *map) +/* Must be called with mutex_lock(&acpi_ioremap_lock) */ +static void acpi_os_drop_map_ref(struct acpi_ioremap *map) { - if (!map->refcount) { - synchronize_rcu_expedited(); - acpi_unmap(map->phys, map->virt); - kfree(map); - } + if (--map->track.refcount) + return; + + list_del_rcu(&map->list); + + INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); + queue_rcu_work(system_percpu_wq, &map->track.rwork); } /** @@ -395,8 +407,8 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) * @size: Size of the address range to drop a reference to. * * Look up the given virtual address range in the list of existing ACPI memory - * mappings, drop a reference to it and unmap it if there are no more active - * references to it. + * mappings, drop a reference to it and if there are no more active references + * to it, queue it up for later removal. * * During early init (when acpi_permanent_mmap has not been set yet) this * routine simply calls __acpi_unmap_table() to get the job done. Since @@ -413,43 +425,43 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) } mutex_lock(&acpi_ioremap_lock); + map = acpi_map_lookup_virt(virt, size); if (!map) { mutex_unlock(&acpi_ioremap_lock); - WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); + WARN(true, "ACPI: %s: bad address %p\n", __func__, virt); return; } acpi_os_drop_map_ref(map); - mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + mutex_unlock(&acpi_ioremap_lock); } EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); +/** + * acpi_os_unmap_memory - Drop a memory mapping reference. + * @virt: Start of the address range to drop a reference to. + * @size: Size of the address range to drop a reference to. + */ void __ref acpi_os_unmap_memory(void *virt, acpi_size size) { - return acpi_os_unmap_iomem((void __iomem *)virt, size); + acpi_os_unmap_iomem((void __iomem *)virt, size); } EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); -int acpi_os_map_generic_address(struct acpi_generic_address *gas) +void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) { u64 addr; - void __iomem *virt; if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; + return NULL; /* Handle possible alignment issues */ memcpy(&addr, &gas->address, sizeof(addr)); if (!addr || !gas->bit_width) - return -EINVAL; - - virt = acpi_os_map_iomem(addr, gas->bit_width / 8); - if (!virt) - return -EIO; + return NULL; - return 0; + return acpi_os_map_iomem(addr, gas->bit_width / 8); } EXPORT_SYMBOL(acpi_os_map_generic_address); @@ -467,21 +479,21 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) return; mutex_lock(&acpi_ioremap_lock); + map = acpi_map_lookup(addr, gas->bit_width / 8); if (!map) { mutex_unlock(&acpi_ioremap_lock); return; } acpi_os_drop_map_ref(map); - mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + mutex_unlock(&acpi_ioremap_lock); } EXPORT_SYMBOL(acpi_os_unmap_generic_address); #ifdef ACPI_FUTURE_USAGE acpi_status -acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) +acpi_os_get_physical_address(void *virt, acpi_physical_address *phys) { if (!phys || !virt) return AE_BAD_PARAMETER; @@ -518,13 +530,12 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, *new_val = NULL; if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { - printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", - acpi_os_name); + pr_info("Overriding _OS definition to '%s'\n", acpi_os_name); *new_val = acpi_os_name; } if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { - printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); + pr_info("Overriding _REV return value to 5\n"); *new_val = (char *)5; } @@ -533,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, static irqreturn_t acpi_irq(int irq, void *dev_id) { - u32 handled; - - handled = (*acpi_irq_handler) (acpi_irq_context); - - if (handled) { + if ((*acpi_irq_handler)(acpi_irq_context)) { acpi_irq_handled++; return IRQ_HANDLED; } else { @@ -565,15 +572,15 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, return AE_ALREADY_ACQUIRED; if (acpi_gsi_to_irq(gsi, &irq) < 0) { - printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", - gsi); + pr_err("SCI (ACPI GSI %d) not registered\n", gsi); return AE_OK; } acpi_irq_handler = handler; acpi_irq_context = context; - if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { - printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); + if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT, + "acpi", acpi_irq)) { + pr_err("SCI (IRQ%d) allocation failed\n", irq); acpi_irq_handler = NULL; return AE_NOT_ACQUIRED; } @@ -600,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) void acpi_os_sleep(u64 ms) { - msleep(ms); + u64 usec = ms * USEC_PER_MSEC, delta_us = 50; + + /* + * Use a hrtimer because the timer wheel timers are optimized for + * cancelation before they expire and this timer is not going to be + * canceled. + * + * Set the delta between the requested sleep time and the effective + * deadline to at least 50 us in case there is an opportunity for timer + * coalescing. + * + * Moreover, longer sleeps can be assumed to need somewhat less timer + * precision, so sacrifice some of it for making the timer a more likely + * candidate for coalescing by setting the delta to 1% of the sleep time + * if it is above 5 ms (this value is chosen so that the delta is a + * continuous function of the sleep time). + */ + if (ms > 5) + delta_us = (USEC_PER_MSEC / 100) * ms; + + usleep_range(usec, usec + delta_us); } void acpi_os_stall(u32 us) @@ -631,22 +658,33 @@ u64 acpi_os_get_timer(void) (ACPI_100NSEC_PER_SEC / HZ); } -acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) +acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) { u32 dummy; - if (!value) + if (!IS_ENABLED(CONFIG_HAS_IOPORT)) { + /* + * set all-1 result as if reading from non-existing + * I/O port + */ + *value = GENMASK(width, 0); + return AE_NOT_IMPLEMENTED; + } + + if (value) + *value = 0; + else value = &dummy; - *value = 0; if (width <= 8) { - *(u8 *) value = inb(port); + *value = inb(port); } else if (width <= 16) { - *(u16 *) value = inw(port); + *value = inw(port); } else if (width <= 32) { - *(u32 *) value = inl(port); + *value = inl(port); } else { - BUG(); + pr_debug("%s: Access width %d not supported\n", __func__, width); + return AE_BAD_PARAMETER; } return AE_OK; @@ -656,6 +694,9 @@ EXPORT_SYMBOL(acpi_os_read_port); acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) { + if (!IS_ENABLED(CONFIG_HAS_IOPORT)) + return AE_NOT_IMPLEMENTED; + if (width <= 8) { outb(value, port); } else if (width <= 16) { @@ -663,7 +704,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) } else if (width <= 32) { outl(value, port); } else { - BUG(); + pr_debug("%s: Access width %d not supported\n", __func__, width); + return AE_BAD_PARAMETER; } return AE_OK; @@ -771,7 +813,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) #ifdef CONFIG_PCI acpi_status -acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, +acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, u64 *value, u32 width) { int result, size; @@ -803,7 +845,7 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, } acpi_status -acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, +acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, u64 value, u32 width) { int result, size; @@ -1050,10 +1092,9 @@ int __init acpi_debugger_init(void) acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { - acpi_status status = AE_OK; struct acpi_os_dpc *dpc; - struct workqueue_struct *queue; int ret; + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); @@ -1061,10 +1102,10 @@ acpi_status acpi_os_execute(acpi_execute_type type, if (type == OSL_DEBUGGER_MAIN_THREAD) { ret = acpi_debugger_create_thread(function, context); if (ret) { - pr_err("Call to kthread_create() failed.\n"); - status = AE_ERROR; + pr_err("Kernel thread creation failed\n"); + return AE_ERROR; } - goto out_thread; + return AE_OK; } /* @@ -1082,44 +1123,41 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; + INIT_WORK(&dpc->work, acpi_os_execute_deferred); /* * To prevent lockdep from complaining unnecessarily, make sure that * there is a different static lockdep key for each workqueue by using * INIT_WORK() for each of them separately. */ - if (type == OSL_NOTIFY_HANDLER) { - queue = kacpi_notify_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else if (type == OSL_GPE_HANDLER) { - queue = kacpid_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else { + switch (type) { + case OSL_NOTIFY_HANDLER: + ret = queue_work(kacpi_notify_wq, &dpc->work); + break; + case OSL_GPE_HANDLER: + /* + * On some machines, a software-initiated SMI causes corruption + * unless the SMI runs on CPU 0. An SMI can be initiated by + * any AML, but typically it's done in GPE-related methods that + * are run via workqueues, so we can avoid the known corruption + * cases by always queueing on CPU 0. + */ + ret = queue_work_on(0, kacpid_wq, &dpc->work); + break; + default: pr_err("Unsupported os_execute type %d.\n", type); - status = AE_ERROR; + goto err; } - - if (ACPI_FAILURE(status)) - goto err_workqueue; - - /* - * On some machines, a software-initiated SMI causes corruption unless - * the SMI runs on CPU 0. An SMI can be initiated by any AML, but - * typically it's done in GPE-related methods that are run via - * workqueues, so we can avoid the known corruption cases by always - * queueing on CPU 0. - */ - ret = queue_work_on(0, queue, &dpc->work); if (!ret) { - printk(KERN_ERR PREFIX - "Call to queue_work() failed.\n"); - status = AE_ERROR; + pr_err("Unable to queue work\n"); + goto err; } -err_workqueue: - if (ACPI_FAILURE(status)) - kfree(dpc); -out_thread: - return status; + + return AE_OK; + +err: + kfree(dpc); + return AE_ERROR; } EXPORT_SYMBOL(acpi_os_execute); @@ -1155,9 +1193,9 @@ acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) { struct acpi_hp_work *hpw; - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "Scheduling hotplug event (%p, %u) for deferred execution.\n", - adev, src)); + acpi_handle_debug(adev->handle, + "Scheduling hotplug event %u for deferred handling\n", + src); hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); if (!hpw) @@ -1185,7 +1223,7 @@ bool acpi_queue_hotplug_work(struct work_struct *work) } acpi_status -acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) +acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *handle) { struct semaphore *sem = NULL; @@ -1345,7 +1383,7 @@ acpi_status acpi_os_signal(u32 function, void *info) { switch (function) { case ACPI_SIGNAL_FATAL: - printk(KERN_ERR PREFIX "Fatal opcode executed\n"); + pr_err("Fatal opcode executed\n"); break; case ACPI_SIGNAL_BREAKPOINT: /* @@ -1397,7 +1435,7 @@ __setup("acpi_os_name=", acpi_os_name_setup); static int __init acpi_no_auto_serialize_setup(char *str) { acpi_gbl_auto_serialize_methods = FALSE; - pr_info("ACPI: auto-serialization disabled\n"); + pr_info("Auto-serialization disabled\n"); return 1; } @@ -1448,38 +1486,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); int acpi_check_resource_conflict(const struct resource *res) { acpi_adr_space_type space_id; - acpi_size length; - u8 warn = 0; - int clash = 0; if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) return 0; - if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) - return 0; if (res->flags & IORESOURCE_IO) space_id = ACPI_ADR_SPACE_SYSTEM_IO; - else + else if (res->flags & IORESOURCE_MEM) space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; + else + return 0; + + if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1)) + return 0; + + pr_info("Resource conflict; ACPI support missing from driver?\n"); + + if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) + return -EBUSY; + + if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) + pr_notice("Resource conflict: System may be unstable or behave erratically\n"); - length = resource_size(res); - if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) - warn = 1; - clash = acpi_check_address_range(space_id, res->start, length, warn); - - if (clash) { - if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { - if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) - printk(KERN_NOTICE "ACPI: This conflict may" - " cause random problems and system" - " instability\n"); - printk(KERN_INFO "ACPI: If an ACPI driver is available" - " for this device, you should use it instead of" - " the native driver\n"); - } - if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) - return -EBUSY; - } return 0; } EXPORT_SYMBOL(acpi_check_resource_conflict); @@ -1487,87 +1515,12 @@ EXPORT_SYMBOL(acpi_check_resource_conflict); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name) { - struct resource res = { - .start = start, - .end = start + n - 1, - .name = name, - .flags = IORESOURCE_IO, - }; + struct resource res = DEFINE_RES_IO_NAMED(start, n, name); return acpi_check_resource_conflict(&res); } EXPORT_SYMBOL(acpi_check_region); -static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, - void *_res, void **return_value) -{ - struct acpi_mem_space_context **mem_ctx; - union acpi_operand_object *handler_obj; - union acpi_operand_object *region_obj2; - union acpi_operand_object *region_obj; - struct resource *res = _res; - acpi_status status; - - region_obj = acpi_ns_get_attached_object(handle); - if (!region_obj) - return AE_OK; - - handler_obj = region_obj->region.handler; - if (!handler_obj) - return AE_OK; - - if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return AE_OK; - - if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) - return AE_OK; - - region_obj2 = acpi_ns_get_secondary_object(region_obj); - if (!region_obj2) - return AE_OK; - - mem_ctx = (void *)®ion_obj2->extra.region_context; - - if (!(mem_ctx[0]->address >= res->start && - mem_ctx[0]->address < res->end)) - return AE_OK; - - status = handler_obj->address_space.setup(region_obj, - ACPI_REGION_DEACTIVATE, - NULL, (void **)mem_ctx); - if (ACPI_SUCCESS(status)) - region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); - - return status; -} - -/** - * acpi_release_memory - Release any mappings done to a memory region - * @handle: Handle to namespace node - * @res: Memory resource - * @level: A level that terminates the search - * - * Walks through @handle and unmaps all SystemMemory Operation Regions that - * overlap with @res and that have already been activated (mapped). - * - * This is a helper that allows drivers to place special requirements on memory - * region that may overlap with operation regions, primarily allowing them to - * safely map the region as non-cached memory. - * - * The unmapped Operation Regions will be automatically remapped next time they - * are called, so the drivers do not need to do anything else. - */ -acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, - u32 level) -{ - if (!(res->flags & IORESOURCE_MEM)) - return AE_TYPE; - - return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, - acpi_deactivate_mem_region, NULL, res, NULL); -} -EXPORT_SYMBOL_GPL(acpi_release_memory); - /* * Let drivers know whether the resource checks are effective */ @@ -1592,19 +1545,20 @@ void acpi_os_delete_lock(acpi_spinlock handle) */ acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) + __acquires(lockp) { - acpi_cpu_flags flags; - spin_lock_irqsave(lockp, flags); - return flags; + spin_lock(lockp); + return 0; } /* * Release a spinlock. See above. */ -void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) +void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used) + __releases(lockp) { - spin_unlock_irqrestore(lockp, flags); + spin_unlock(lockp); } #ifndef ACPI_USE_LOCAL_CACHE @@ -1625,7 +1579,7 @@ void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) ******************************************************************************/ acpi_status -acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) +acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t **cache) { *cache = kmem_cache_create(name, size, 0, 0, NULL); if (*cache == NULL) @@ -1646,10 +1600,10 @@ acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) * ******************************************************************************/ -acpi_status acpi_os_purge_cache(acpi_cache_t * cache) +acpi_status acpi_os_purge_cache(acpi_cache_t *cache) { kmem_cache_shrink(cache); - return (AE_OK); + return AE_OK; } /******************************************************************************* @@ -1665,10 +1619,10 @@ acpi_status acpi_os_purge_cache(acpi_cache_t * cache) * ******************************************************************************/ -acpi_status acpi_os_delete_cache(acpi_cache_t * cache) +acpi_status acpi_os_delete_cache(acpi_cache_t *cache) { kmem_cache_destroy(cache); - return (AE_OK); + return AE_OK; } /******************************************************************************* @@ -1685,17 +1639,17 @@ acpi_status acpi_os_delete_cache(acpi_cache_t * cache) * ******************************************************************************/ -acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) +acpi_status acpi_os_release_object(acpi_cache_t *cache, void *object) { kmem_cache_free(cache, object); - return (AE_OK); + return AE_OK; } #endif static int __init acpi_no_static_ssdt_setup(char *s) { acpi_gbl_disable_ssdt_table_install = TRUE; - pr_info("ACPI: static SSDT installation disabled\n"); + pr_info("Static SSDT installation disabled\n"); return 0; } @@ -1704,8 +1658,7 @@ early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); static int __init acpi_disable_return_repair(char *s) { - printk(KERN_NOTICE PREFIX - "ACPI: Predefined validation mechanism disabled\n"); + pr_notice("Predefined validation mechanism disabled\n"); acpi_gbl_disable_auto_repair = TRUE; return 1; @@ -1717,17 +1670,22 @@ acpi_status __init acpi_os_initialize(void) { acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); - acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); - acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); + + acpi_gbl_xgpe0_block_logical_address = + (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); + acpi_gbl_xgpe1_block_logical_address = + (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); + if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { /* * Use acpi_os_map_generic_address to pre-map the reset * register if it's in system memory. */ - int rv; + void *rv; rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); - pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); + pr_debug("%s: Reset register mapping %s\n", __func__, + rv ? "successful" : "failed"); } acpi_os_initialized = true; @@ -1736,8 +1694,8 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { - kacpid_wq = alloc_workqueue("kacpid", 0, 1); - kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); + kacpid_wq = alloc_workqueue("kacpid", WQ_PERCPU, 1); + kacpi_notify_wq = alloc_workqueue("kacpi_notify", WQ_PERCPU, 0); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); @@ -1755,8 +1713,12 @@ acpi_status acpi_os_terminate(void) acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); + acpi_gbl_xgpe0_block_logical_address = 0UL; + acpi_gbl_xgpe1_block_logical_address = 0UL; + acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); + if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); @@ -1771,6 +1733,7 @@ acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) { int rc = 0; + if (__acpi_os_prepare_sleep) rc = __acpi_os_prepare_sleep(sleep_state, pm1a_control, pm1b_control); @@ -1793,6 +1756,7 @@ acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b) { int rc = 0; + if (__acpi_os_prepare_extended_sleep) rc = __acpi_os_prepare_extended_sleep(sleep_state, val_a, val_b); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index c576a6fe4ebb..ad81aa03fe2f 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * pci_irq.c - ACPI PCI Interrupt Routing ($Revision: 11 $) * @@ -6,22 +7,9 @@ * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> * (c) Copyright 2008 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: PCI: " fmt #include <linux/dmi.h> #include <linux/kernel.h> @@ -34,11 +22,7 @@ #include <linux/acpi.h> #include <linux/slab.h> #include <linux/interrupt.h> - -#define PREFIX "ACPI: " - -#define _COMPONENT ACPI_PCI_COMPONENT -ACPI_MODULE_NAME("pci_irq"); +#include <linux/string_choices.h> struct acpi_prt_entry { struct acpi_pci_id id; @@ -139,7 +123,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, entry->pin == quirk->pin && !strcmp(prt->source, quirk->source) && strlen(prt->source) >= strlen(quirk->actual_source)) { - printk(KERN_WARNING PREFIX "firmware reports " + pr_warn("Firmware reports " "%04x:%02x:%02x PCI INT %c connected to %s; " "changing to %s\n", entry->id.segment, entry->id.bus, @@ -188,7 +172,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, * configure the IRQ assigned to this slot|dev|pin. The 'source_index' * indicates which resource descriptor in the resource template (of * the link device) this interrupt is allocated from. - * + * * NOTE: Don't query the Link Device for IRQ information at this time * because Link Device enumeration may not have occurred yet * (e.g. exists somewhere 'below' this _PRT entry in the ACPI @@ -204,12 +188,9 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, * the IRQ value, which is hardwired to specific interrupt inputs on * the interrupt controller. */ - - ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, - " %04x:%02x:%02x[%c] -> %s[%d]\n", - entry->id.segment, entry->id.bus, - entry->id.device, pin_name(entry->pin), - prt->source, entry->index)); + pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n", + entry->id.segment, entry->id.bus, entry->id.device, + pin_name(entry->pin), prt->source, entry->index); *entry_ptr = entry; @@ -308,7 +289,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev, } #endif /* CONFIG_X86_IO_APIC */ -static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) +struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) { struct acpi_prt_entry *entry = NULL; struct pci_dev *bridge; @@ -320,8 +301,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) #ifdef CONFIG_X86_IO_APIC acpi_reroute_boot_interrupt(dev, entry); #endif /* CONFIG_X86_IO_APIC */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", - pci_name(dev), pin_name(pin))); + dev_dbg(&dev->dev, "Found [%c] _PRT entry\n", pin_name(pin)); return entry; } @@ -337,9 +317,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) /* PC card has the same IRQ as its cardbridge */ bridge_pin = bridge->pin; if (!bridge_pin) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No interrupt pin configured for device %s\n", - pci_name(bridge))); + dev_dbg(&bridge->dev, "No interrupt pin configured\n"); return NULL; } pin = bridge_pin; @@ -347,10 +325,8 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry); if (!ret && entry) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Derived GSI for %s INT %c from %s\n", - pci_name(dev), pin_name(orig_pin), - pci_name(bridge))); + dev_dbg(&dev->dev, "Derived GSI INT %c from %s\n", + pin_name(orig_pin), pci_name(bridge)); return entry; } @@ -412,13 +388,15 @@ int acpi_pci_irq_enable(struct pci_dev *dev) u8 pin; int triggering = ACPI_LEVEL_SENSITIVE; /* - * On ARM systems with the GIC interrupt model, level interrupts + * On ARM systems with the GIC interrupt model, or LoongArch + * systems with the LPIC interrupt model, level interrupts * are always polarity high by specification; PCI legacy * IRQs lines are inverted before reaching the interrupt * controller and must therefore be considered active high * as default. */ - int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? + int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC || + acpi_irq_model == ACPI_IRQ_MODEL_LPIC ? ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW; char *link = NULL; char link_desc[16]; @@ -426,9 +404,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) pin = dev->pin; if (!pin) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No interrupt pin configured for device %s\n", - pci_name(dev))); + dev_dbg(&dev->dev, "No interrupt pin configured\n"); return 0; } @@ -462,8 +438,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. */ - if (!acpi_pci_irq_valid(dev, pin)) + if (!acpi_pci_irq_valid(dev, pin)) { + kfree(entry); return 0; + } if (acpi_isa_register_gsi(dev)) dev_warn(&dev->dev, "PCI INT %c: no GSI\n", @@ -491,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", pin_name(pin), link_desc, gsi, (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", - (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); + str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq); kfree(entry); return 0; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index d5eec352a6e1..bed7dc85612e 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * pci_link.c - ACPI PCI Interrupt Link Device Driver ($Revision: 34 $) * @@ -5,26 +6,14 @@ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * TBD: - * 1. Support more than one IRQ resource entry per link device (index). + * TBD: + * 1. Support more than one IRQ resource entry per link device (index). * 2. Implement start/stop mechanism and use ACPI Bus Driver facilities * for IRQ management (e.g. start()->_SRS). */ +#define pr_fmt(fmt) "ACPI: PCI: " fmt + #include <linux/syscore_ops.h> #include <linux/kernel.h> #include <linux/module.h> @@ -40,12 +29,8 @@ #include "internal.h" -#define _COMPONENT ACPI_PCI_COMPONENT -ACPI_MODULE_NAME("pci_link"); #define ACPI_PCI_LINK_CLASS "pci_irq_routing" #define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link" -#define ACPI_PCI_LINK_FILE_INFO "info" -#define ACPI_PCI_LINK_FILE_STATUS "state" #define ACPI_PCI_LINK_MAX_POSSIBLE 16 static int acpi_pci_link_add(struct acpi_device *device, @@ -100,6 +85,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, void *context) { struct acpi_pci_link *link = context; + acpi_handle handle = link->device->handle; u32 i; switch (resource->type) { @@ -109,18 +95,18 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; - if (!p || !p->interrupt_count) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Blank _PRS IRQ resource\n")); + if (!p->interrupt_count) { + acpi_handle_debug(handle, + "Blank _PRS IRQ resource\n"); return AE_OK; } for (i = 0; (i < p->interrupt_count && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { if (!p->interrupts[i]) { - printk(KERN_WARNING PREFIX - "Invalid _PRS IRQ %d\n", - p->interrupts[i]); + acpi_handle_debug(handle, + "Invalid _PRS IRQ %d\n", + p->interrupts[i]); continue; } link->irq.possible[i] = p->interrupts[i]; @@ -135,18 +121,18 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; - if (!p || !p->interrupt_count) { - printk(KERN_WARNING PREFIX - "Blank _PRS EXT IRQ resource\n"); + if (!p->interrupt_count) { + acpi_handle_debug(handle, + "Blank _PRS EXT IRQ resource\n"); return AE_OK; } for (i = 0; (i < p->interrupt_count && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) { if (!p->interrupts[i]) { - printk(KERN_WARNING PREFIX - "Invalid _PRS IRQ %d\n", - p->interrupts[i]); + acpi_handle_debug(handle, + "Invalid _PRS IRQ %d\n", + p->interrupts[i]); continue; } link->irq.possible[i] = p->interrupts[i]; @@ -158,8 +144,8 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, break; } default: - printk(KERN_ERR PREFIX "_PRS resource type 0x%x isn't an IRQ\n", - resource->type); + acpi_handle_debug(handle, "_PRS resource type 0x%x is not IRQ\n", + resource->type); return AE_OK; } @@ -168,18 +154,18 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, static int acpi_pci_link_get_possible(struct acpi_pci_link *link) { + acpi_handle handle = link->device->handle; acpi_status status; - status = acpi_walk_resources(link->device->handle, METHOD_NAME__PRS, + status = acpi_walk_resources(handle, METHOD_NAME__PRS, acpi_pci_link_check_possible, link); if (ACPI_FAILURE(status)) { - acpi_handle_debug(link->device->handle, "_PRS not present or invalid"); + acpi_handle_debug(handle, "_PRS not present or invalid"); return 0; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Found %d possible IRQs\n", - link->irq.possible_count)); + acpi_handle_debug(handle, "Found %d possible IRQs\n", + link->irq.possible_count); return 0; } @@ -196,13 +182,12 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, - * particularly those those w/ _STA disabled + * particularly those w/ _STA disabled */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Blank _CRS IRQ resource\n")); + pr_debug("Blank _CRS IRQ resource\n"); return AE_OK; } *irq = p->interrupts[0]; @@ -212,13 +197,12 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; - if (!p || !p->interrupt_count) { + if (!p->interrupt_count) { /* * extended IRQ descriptors must * return at least 1 IRQ */ - printk(KERN_WARNING PREFIX - "Blank _CRS EXT IRQ resource\n"); + pr_debug("Blank _CRS EXT IRQ resource\n"); return AE_OK; } *irq = p->interrupts[0]; @@ -226,8 +210,8 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, } break; default: - printk(KERN_ERR PREFIX "_CRS resource type 0x%x isn't an IRQ\n", - resource->type); + pr_debug("_CRS resource type 0x%x is not IRQ\n", + resource->type); return AE_OK; } @@ -243,8 +227,9 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, */ static int acpi_pci_link_get_current(struct acpi_pci_link *link) { - int result = 0; + acpi_handle handle = link->device->handle; acpi_status status; + int result = 0; int irq = 0; link->irq.active = 0; @@ -254,36 +239,36 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link) /* Query _STA, set link->device->status */ result = acpi_bus_get_status(link->device); if (result) { - printk(KERN_ERR PREFIX "Unable to read status\n"); + acpi_handle_err(handle, "Unable to read status\n"); goto end; } if (!link->device->status.enabled) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link disabled\n")); + acpi_handle_debug(handle, "Link disabled\n"); return 0; } } - /* - * Query and parse _CRS to get the current IRQ assignment. + /* + * Query and parse _CRS to get the current IRQ assignment. */ - status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS, + status = acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_pci_link_check_current, &irq); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS")); + acpi_evaluation_failure_warn(handle, "_CRS", status); result = -ENODEV; goto end; } if (acpi_strict && !irq) { - printk(KERN_ERR PREFIX "_CRS returned 0\n"); + acpi_handle_err(handle, "_CRS returned 0\n"); result = -ENODEV; } link->irq.active = irq; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link at IRQ %d \n", link->irq.active)); + acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active); end: return result; @@ -291,13 +276,14 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link) static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) { - int result; - acpi_status status; struct { struct acpi_resource res; struct acpi_resource end; } *resource; struct acpi_buffer buffer = { 0, NULL }; + acpi_handle handle = link->device->handle; + acpi_status status; + int result; if (!irq) return -EINVAL; @@ -317,10 +303,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) - resource->res.data.irq.sharable = + resource->res.data.irq.shareable = ACPI_EXCLUSIVE; else - resource->res.data.irq.sharable = ACPI_SHARED; + resource->res.data.irq.shareable = ACPI_SHARED; resource->res.data.irq.interrupt_count = 1; resource->res.data.irq.interrupts[0] = irq; break; @@ -335,16 +321,17 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.extended_irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) - resource->res.data.irq.sharable = + resource->res.data.extended_irq.shareable = ACPI_EXCLUSIVE; else - resource->res.data.irq.sharable = ACPI_SHARED; + resource->res.data.extended_irq.shareable = ACPI_SHARED; resource->res.data.extended_irq.interrupt_count = 1; resource->res.data.extended_irq.interrupts[0] = irq; /* ignore resource_source, it's optional */ break; default: - printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type); + acpi_handle_err(handle, "Invalid resource type %d\n", + link->irq.resource_type); result = -EINVAL; goto end; @@ -357,7 +344,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) /* check for total failure */ if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SRS")); + acpi_evaluation_failure_warn(handle, "_SRS", status); result = -ENODEV; goto end; } @@ -365,15 +352,11 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) /* Query _STA, set device->status */ result = acpi_bus_get_status(link->device); if (result) { - printk(KERN_ERR PREFIX "Unable to read status\n"); + acpi_handle_err(handle, "Unable to read status\n"); goto end; } - if (!link->device->status.enabled) { - printk(KERN_WARNING PREFIX - "%s [%s] disabled and referenced, BIOS bug\n", - acpi_device_name(link->device), - acpi_device_bid(link->device)); - } + if (!link->device->status.enabled) + acpi_handle_warn(handle, "Disabled and referenced, BIOS bug\n"); /* Query _CRS, set link->irq.active */ result = acpi_pci_link_get_current(link); @@ -390,14 +373,12 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) * policy: when _CRS doesn't return what we just _SRS * assume _SRS worked and override _CRS value. */ - printk(KERN_WARNING PREFIX - "%s [%s] BIOS reported IRQ %d, using IRQ %d\n", - acpi_device_name(link->device), - acpi_device_bid(link->device), link->irq.active, irq); + acpi_handle_warn(handle, "BIOS reported IRQ %d, using IRQ %d\n", + link->irq.active, irq); link->irq.active = irq; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Set IRQ %d\n", link->irq.active)); + acpi_handle_debug(handle, "Set IRQ %d\n", link->irq.active); end: kfree(resource); @@ -411,7 +392,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) /* * "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt * Link Devices to move the PIRQs around to minimize sharing. - * + * * "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs * that the BIOS has already set to active. This is necessary because * ACPI has no automatic means of knowing what ISA IRQs are used. Note that @@ -429,7 +410,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) * * Note that PCI IRQ routers have a list of possible IRQs, * which may not include the IRQs this table says are available. - * + * * Since this heuristic can't tell the difference between a link * that no device will attach to, vs. a link which may be shared * by multiple active devices -- it is not optimal. @@ -546,6 +527,7 @@ static int acpi_irq_balance = -1; /* 0: static, 1: balance */ static int acpi_pci_link_allocate(struct acpi_pci_link *link) { + acpi_handle handle = link->device->handle; int irq; int i; @@ -568,8 +550,8 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) */ if (i == link->irq.possible_count) { if (acpi_strict) - printk(KERN_WARNING PREFIX "_CRS %d not found" - " in _PRS\n", link->irq.active); + acpi_handle_warn(handle, "_CRS %d not found in _PRS\n", + link->irq.active); link->irq.active = 0; } @@ -593,28 +575,23 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) } } if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { - printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " - "Try pci=noacpi or acpi=off\n", - acpi_device_name(link->device), - acpi_device_bid(link->device)); + acpi_handle_err(handle, + "No IRQ available. Try pci=noacpi or acpi=off\n"); return -ENODEV; } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { - printk(KERN_ERR PREFIX "Unable to set IRQ for %s [%s]. " - "Try pci=noacpi or acpi=off\n", - acpi_device_name(link->device), - acpi_device_bid(link->device)); + acpi_handle_err(handle, + "Unable to set IRQ. Try pci=noacpi or acpi=off\n"); return -ENODEV; } else { if (link->irq.active < ACPI_MAX_ISA_IRQS) acpi_isa_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; - pr_info("%s [%s] enabled at IRQ %d\n", - acpi_device_name(link->device), - acpi_device_bid(link->device), link->irq.active); + acpi_handle_info(handle, "Enabled at IRQ %d\n", + link->irq.active); } link->irq.initialized = 1; @@ -629,25 +606,23 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, int *polarity, char **name) { - int result; - struct acpi_device *device; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_pci_link *link; - result = acpi_bus_get_device(handle, &device); - if (result) { - printk(KERN_ERR PREFIX "Invalid link device\n"); + if (!device) { + acpi_handle_err(handle, "Invalid link device\n"); return -1; } link = acpi_driver_data(device); if (!link) { - printk(KERN_ERR PREFIX "Invalid link context\n"); + acpi_handle_err(handle, "Invalid link context\n"); return -1; } /* TBD: Support multiple index (IRQ) entries per Link Device */ if (index) { - printk(KERN_ERR PREFIX "Invalid index %d\n", index); + acpi_handle_err(handle, "Invalid index %d\n", index); return -1; } @@ -659,7 +634,7 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, if (!link->irq.active) { mutex_unlock(&acpi_link_lock); - printk(KERN_ERR PREFIX "Link active IRQ is 0!\n"); + acpi_handle_err(handle, "Link active IRQ is 0!\n"); return -1; } link->refcnt++; @@ -671,10 +646,8 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, *polarity = link->irq.polarity; if (name) *name = acpi_device_bid(link->device); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Link %s is referenced\n", - acpi_device_bid(link->device))); - return (link->irq.active); + acpi_handle_debug(handle, "Link is referenced\n"); + return link->irq.active; } /* @@ -683,26 +656,24 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, */ int acpi_pci_link_free_irq(acpi_handle handle) { - struct acpi_device *device; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_pci_link *link; - acpi_status result; - result = acpi_bus_get_device(handle, &device); - if (result) { - printk(KERN_ERR PREFIX "Invalid link device\n"); + if (!device) { + acpi_handle_err(handle, "Invalid link device\n"); return -1; } link = acpi_driver_data(device); if (!link) { - printk(KERN_ERR PREFIX "Invalid link context\n"); + acpi_handle_err(handle, "Invalid link context\n"); return -1; } mutex_lock(&acpi_link_lock); if (!link->irq.initialized) { mutex_unlock(&acpi_link_lock); - printk(KERN_ERR PREFIX "Link isn't initialized\n"); + acpi_handle_err(handle, "Link isn't initialized\n"); return -1; } #ifdef FUTURE_USE @@ -717,15 +688,13 @@ int acpi_pci_link_free_irq(acpi_handle handle) */ link->refcnt--; #endif - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Link %s is dereferenced\n", - acpi_device_bid(link->device))); + acpi_handle_debug(handle, "Link is dereferenced\n"); if (link->refcnt == 0) acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL); mutex_unlock(&acpi_link_lock); - return (link->irq.active); + return link->irq.active; } /* -------------------------------------------------------------------------- @@ -735,18 +704,18 @@ int acpi_pci_link_free_irq(acpi_handle handle) static int acpi_pci_link_add(struct acpi_device *device, const struct acpi_device_id *not_used) { - int result; + acpi_handle handle = device->handle; struct acpi_pci_link *link; + int result; int i; - int found = 0; link = kzalloc(sizeof(struct acpi_pci_link), GFP_KERNEL); if (!link) return -ENOMEM; link->device = device; - strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); + strscpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); device->driver_data = link; mutex_lock(&acpi_link_lock); @@ -757,36 +726,30 @@ static int acpi_pci_link_add(struct acpi_device *device, /* query and set link->irq.active */ acpi_pci_link_get_current(link); - printk(KERN_INFO PREFIX "%s [%s] (IRQs", acpi_device_name(device), - acpi_device_bid(device)); + pr_info("Interrupt link %s configured for IRQ %d\n", + acpi_device_bid(device), link->irq.active); + for (i = 0; i < link->irq.possible_count; i++) { - if (link->irq.active == link->irq.possible[i]) { - printk(KERN_CONT " *%d", link->irq.possible[i]); - found = 1; - } else - printk(KERN_CONT " %d", link->irq.possible[i]); + if (link->irq.active != link->irq.possible[i]) + acpi_handle_debug(handle, "Possible IRQ %d\n", + link->irq.possible[i]); } - printk(KERN_CONT ")"); - - if (!found) - printk(KERN_CONT " *%d", link->irq.active); - if (!link->device->status.enabled) - printk(KERN_CONT ", disabled."); - - printk(KERN_CONT "\n"); + pr_info("Interrupt link %s disabled\n", acpi_device_bid(device)); list_add_tail(&link->list, &acpi_link_list); end: /* disable all links -- to be activated on use */ - acpi_evaluate_object(device->handle, "_DIS", NULL, NULL); + acpi_evaluate_object(handle, "_DIS", NULL, NULL); mutex_unlock(&acpi_link_lock); if (result) kfree(link); + acpi_dev_clear_dependencies(device); + return result < 0 ? result : 1; } @@ -798,7 +761,7 @@ static int acpi_pci_link_resume(struct acpi_pci_link *link) return 0; } -static void irqrouter_resume(void) +static void irqrouter_resume(void *data) { struct acpi_pci_link *link; @@ -925,10 +888,14 @@ static int __init acpi_irq_balance_set(char *str) __setup("acpi_irq_balance", acpi_irq_balance_set); -static struct syscore_ops irqrouter_syscore_ops = { +static const struct syscore_ops irqrouter_syscore_ops = { .resume = irqrouter_resume, }; +static struct syscore irqrouter_syscore = { + .ops = &irqrouter_syscore_ops, +}; + void __init acpi_pci_link_init(void) { if (acpi_noirq) @@ -941,6 +908,6 @@ void __init acpi_pci_link_init(void) else acpi_irq_balance = 0; } - register_syscore_ops(&irqrouter_syscore_ops); + register_syscore(&irqrouter_syscore); acpi_scan_add_handler(&pci_link_handler); } diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index a4e8432fc2fb..58e10a980114 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -1,20 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Broadcom * Author: Jayachandran C <jchandra@broadcom.com> * Copyright (C) 2016 Semihalf * Author: Tomasz Nowicki <tn@semihalf.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation (the "GPL"). - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 (GPLv2) for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 (GPLv2) along with this source code. */ #define pr_fmt(fmt) "ACPI: " fmt @@ -40,7 +29,7 @@ struct mcfg_fixup { u32 oem_revision; u16 segment; struct resource bus_range; - struct pci_ecam_ops *ops; + const struct pci_ecam_ops *ops; struct resource cfgres; }; @@ -52,6 +41,20 @@ struct mcfg_fixup { static struct mcfg_fixup mcfg_quirks[] = { /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ +#ifdef CONFIG_ARM64 + +#define AL_ECAM(table_id, rev, seg, ops) \ + { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } + + AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops), + AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops), + #define QCOM_ECAM32(seg) \ { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } @@ -115,6 +118,13 @@ static struct mcfg_fixup mcfg_quirks[] = { THUNDER_ECAM_QUIRK(2, 12), THUNDER_ECAM_QUIRK(2, 13), + { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops}, + #define XGENE_V1_ECAM_MCFG(rev, seg) \ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ &xgene_v1_pcie_ecam_ops } @@ -141,6 +151,49 @@ static struct mcfg_fixup mcfg_quirks[] = { XGENE_V2_ECAM_MCFG(4, 0), XGENE_V2_ECAM_MCFG(4, 1), XGENE_V2_ECAM_MCFG(4, 2), + +#define ALTRA_ECAM_QUIRK(rev, seg) \ + { "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops } + + ALTRA_ECAM_QUIRK(1, 0), + ALTRA_ECAM_QUIRK(1, 1), + ALTRA_ECAM_QUIRK(1, 2), + ALTRA_ECAM_QUIRK(1, 3), + ALTRA_ECAM_QUIRK(1, 4), + ALTRA_ECAM_QUIRK(1, 5), + ALTRA_ECAM_QUIRK(1, 6), + ALTRA_ECAM_QUIRK(1, 7), + ALTRA_ECAM_QUIRK(1, 8), + ALTRA_ECAM_QUIRK(1, 9), + ALTRA_ECAM_QUIRK(1, 10), + ALTRA_ECAM_QUIRK(1, 11), + ALTRA_ECAM_QUIRK(1, 12), + ALTRA_ECAM_QUIRK(1, 13), + ALTRA_ECAM_QUIRK(1, 14), + ALTRA_ECAM_QUIRK(1, 15), +#endif /* ARM64 */ + +#ifdef CONFIG_LOONGARCH +#define LOONGSON_ECAM_MCFG(table_id, seg) \ + { "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops } + + LOONGSON_ECAM_MCFG("\0", 0), + LOONGSON_ECAM_MCFG("LOONGSON", 0), + LOONGSON_ECAM_MCFG("\0", 1), + LOONGSON_ECAM_MCFG("LOONGSON", 1), + LOONGSON_ECAM_MCFG("\0", 2), + LOONGSON_ECAM_MCFG("LOONGSON", 2), + LOONGSON_ECAM_MCFG("\0", 3), + LOONGSON_ECAM_MCFG("LOONGSON", 3), + LOONGSON_ECAM_MCFG("\0", 4), + LOONGSON_ECAM_MCFG("LOONGSON", 4), + LOONGSON_ECAM_MCFG("\0", 5), + LOONGSON_ECAM_MCFG("LOONGSON", 5), + LOONGSON_ECAM_MCFG("\0", 6), + LOONGSON_ECAM_MCFG("LOONGSON", 6), + LOONGSON_ECAM_MCFG("\0", 7), + LOONGSON_ECAM_MCFG("LOONGSON", 7), +#endif /* LOONGARCH */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; @@ -152,7 +205,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment, { if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) && !memcmp(f->oem_table_id, mcfg_oem_table_id, - ACPI_OEM_TABLE_ID_SIZE) && + ACPI_OEM_TABLE_ID_SIZE) && f->oem_revision == mcfg_oem_revision && f->segment == segment && resource_contains(&f->bus_range, bus_range)) @@ -164,7 +217,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment, static void pci_mcfg_apply_quirks(struct acpi_pci_root *root, struct resource *cfgres, - struct pci_ecam_ops **ecam_ops) + const struct pci_ecam_ops **ecam_ops) { #ifdef CONFIG_PCI_QUIRKS u16 segment = root->segment; @@ -190,9 +243,9 @@ static void pci_mcfg_apply_quirks(struct acpi_pci_root *root, static LIST_HEAD(pci_mcfg_list); int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, - struct pci_ecam_ops **ecam_ops) + const struct pci_ecam_ops **ecam_ops) { - struct pci_ecam_ops *ops = &pci_generic_ecam_ops; + const struct pci_ecam_ops *ops = &pci_generic_ecam_ops; struct resource *bus_res = &root->secondary; u16 seg = root->segment; struct mcfg_entry *e; @@ -279,5 +332,5 @@ void __init pci_mmcfg_late_init(void) { int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse); if (err) - pr_err("Failed to parse MCFG (%d)\n", err); + pr_debug("Failed to parse MCFG (%d)\n", err); } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 707aafc7c2aa..74ade4160314 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -1,24 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 40 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -28,18 +17,13 @@ #include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/pci-acpi.h> -#include <linux/pci-aspm.h> #include <linux/dmar.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/dmi.h> #include <linux/platform_data/x86/apple.h> -#include <acpi/apei.h> /* for acpi_hest_init() */ - #include "internal.h" -#define _COMPONENT ACPI_PCI_COMPONENT -ACPI_MODULE_NAME("pci_root"); #define ACPI_PCI_ROOT_CLASS "pci_bridge" #define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" static int acpi_pci_root_add(struct acpi_device *device, @@ -72,22 +56,19 @@ static struct acpi_scan_handler pci_root_handler = { }, }; -static DEFINE_MUTEX(osc_lock); - /** * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge - * @handle - the ACPI CA node in question. + * @handle: the ACPI CA node in question. * * Note: we could make this API take a struct acpi_device * instead, but * for now, it's more convenient to operate on an acpi_handle. */ int acpi_is_root_bridge(acpi_handle handle) { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); int ret; - struct acpi_device *device; - ret = acpi_bus_get_device(handle, &device); - if (ret) + if (!device) return 0; ret = acpi_match_device_ids(device, root_device_ids); @@ -145,6 +126,8 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = { { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" }, { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" }, { OSC_PCI_MSI_SUPPORT, "MSI" }, + { OSC_PCI_EDR_SUPPORT, "EDR" }, + { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" }, }; static struct pci_osc_bit_struct pci_osc_control_bit[] = { @@ -154,6 +137,18 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = { { OSC_PCI_EXPRESS_AER_CONTROL, "AER" }, { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" }, { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" }, + { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" }, +}; + +static struct pci_osc_bit_struct cxl_osc_support_bit[] = { + { OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT, "CXL11PortRegAccess" }, + { OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT, "CXL20PortDevRegAccess" }, + { OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT, "CXLProtocolErrorReporting" }, + { OSC_CXL_NATIVE_HP_SUPPORT, "CXLNativeHotPlug" }, +}; + +static struct pci_osc_bit_struct cxl_osc_control_bit[] = { + { OSC_CXL_ERROR_REPORTING_CONTROL, "CXLMemErrorReporting" }, }; static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, @@ -166,7 +161,7 @@ static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, buf[0] = '\0'; for (i = 0, entry = table; i < size; i++, entry++) if (word & entry->bit) - len += snprintf(buf + len, sizeof(buf) - len, "%s%s", + len += scnprintf(buf + len, sizeof(buf) - len, "%s%s", len ? " " : "", entry->desc); dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf); @@ -184,73 +179,112 @@ static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word) ARRAY_SIZE(pci_osc_control_bit)); } +static void decode_cxl_osc_support(struct acpi_pci_root *root, char *msg, u32 word) +{ + decode_osc_bits(root, msg, word, cxl_osc_support_bit, + ARRAY_SIZE(cxl_osc_support_bit)); +} + +static void decode_cxl_osc_control(struct acpi_pci_root *root, char *msg, u32 word) +{ + decode_osc_bits(root, msg, word, cxl_osc_control_bit, + ARRAY_SIZE(cxl_osc_control_bit)); +} + +static inline bool is_pcie(struct acpi_pci_root *root) +{ + return root->bridge_type == ACPI_BRIDGE_TYPE_PCIE; +} + +static inline bool is_cxl(struct acpi_pci_root *root) +{ + return root->bridge_type == ACPI_BRIDGE_TYPE_CXL; +} + static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; +static u8 cxl_osc_uuid_str[] = "68F2D50B-C469-4d8A-BD3D-941A103FD3FC"; -static acpi_status acpi_pci_run_osc(acpi_handle handle, - const u32 *capbuf, u32 *retval) +static char *to_uuid(struct acpi_pci_root *root) +{ + if (is_cxl(root)) + return cxl_osc_uuid_str; + return pci_osc_uuid_str; +} + +static int cap_length(struct acpi_pci_root *root) +{ + if (is_cxl(root)) + return sizeof(u32) * OSC_CXL_CAPABILITY_DWORDS; + return sizeof(u32) * OSC_PCI_CAPABILITY_DWORDS; +} + +static acpi_status acpi_pci_run_osc(struct acpi_pci_root *root, + const u32 *capbuf, u32 *pci_control, + u32 *cxl_control) { struct acpi_osc_context context = { - .uuid_str = pci_osc_uuid_str, + .uuid_str = to_uuid(root), .rev = 1, - .cap.length = 12, + .cap.length = cap_length(root), .cap.pointer = (void *)capbuf, }; acpi_status status; - status = acpi_run_osc(handle, &context); + status = acpi_run_osc(root->device->handle, &context); if (ACPI_SUCCESS(status)) { - *retval = *((u32 *)(context.ret.pointer + 8)); + *pci_control = acpi_osc_ctx_get_pci_control(&context); + if (is_cxl(root)) + *cxl_control = acpi_osc_ctx_get_cxl_control(&context); kfree(context.ret.pointer); } return status; } -static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, - u32 support, - u32 *control) +static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support, + u32 *control, u32 cxl_support, + u32 *cxl_control) { acpi_status status; - u32 result, capbuf[3]; + u32 pci_result, cxl_result, capbuf[OSC_CXL_CAPABILITY_DWORDS]; - support &= OSC_PCI_SUPPORT_MASKS; support |= root->osc_support_set; capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = support; - if (control) { - *control &= OSC_PCI_CONTROL_MASKS; - capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set; - } else { - /* Run _OSC query only with existing controls. */ - capbuf[OSC_CONTROL_DWORD] = root->osc_control_set; + capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set; + + if (is_cxl(root)) { + cxl_support |= root->osc_ext_support_set; + capbuf[OSC_EXT_SUPPORT_DWORD] = cxl_support; + capbuf[OSC_EXT_CONTROL_DWORD] = *cxl_control | root->osc_ext_control_set; } - status = acpi_pci_run_osc(root->device->handle, capbuf, &result); +retry: + status = acpi_pci_run_osc(root, capbuf, &pci_result, &cxl_result); if (ACPI_SUCCESS(status)) { root->osc_support_set = support; - if (control) - *control = result; + *control = pci_result; + if (is_cxl(root)) { + root->osc_ext_support_set = cxl_support; + *cxl_control = cxl_result; + } + } else if (is_cxl(root)) { + /* + * CXL _OSC is optional on CXL 1.1 hosts. Fall back to PCIe _OSC + * upon any failure using CXL _OSC. + */ + root->bridge_type = ACPI_BRIDGE_TYPE_PCIE; + goto retry; } return status; } -static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags) -{ - acpi_status status; - - mutex_lock(&osc_lock); - status = acpi_pci_query_osc(root, flags, NULL); - mutex_unlock(&osc_lock); - return status; -} - struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_pci_root *root; - struct acpi_device *device; - if (acpi_bus_get_device(handle, &device) || - acpi_match_device_ids(device, root_device_ids)) + if (!device || acpi_match_device_ids(device, root_device_ids)) return NULL; root = acpi_driver_data(device); @@ -259,11 +293,6 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle) } EXPORT_SYMBOL_GPL(acpi_pci_find_root); -struct acpi_handle_node { - struct list_head node; - acpi_handle handle; -}; - /** * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev * @handle: the handle in question @@ -278,76 +307,26 @@ struct acpi_handle_node { */ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) { - int dev, fn; - unsigned long long adr; - acpi_status status; - acpi_handle phandle; - struct pci_bus *pbus; - struct pci_dev *pdev = NULL; - struct acpi_handle_node *node, *tmp; - struct acpi_pci_root *root; - LIST_HEAD(device_list); + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); + struct acpi_device_physical_node *pn; + struct pci_dev *pci_dev = NULL; - /* - * Walk up the ACPI CA namespace until we reach a PCI root bridge. - */ - phandle = handle; - while (!acpi_is_root_bridge(phandle)) { - node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL); - if (!node) - goto out; - - INIT_LIST_HEAD(&node->node); - node->handle = phandle; - list_add(&node->node, &device_list); - - status = acpi_get_parent(phandle, &phandle); - if (ACPI_FAILURE(status)) - goto out; - } - - root = acpi_pci_find_root(phandle); - if (!root) - goto out; - - pbus = root->bus; - - /* - * Now, walk back down the PCI device tree until we return to our - * original handle. Assumes that everything between the PCI root - * bridge and the device we're looking for must be a P2P bridge. - */ - list_for_each_entry(node, &device_list, node) { - acpi_handle hnd = node->handle; - status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr); - if (ACPI_FAILURE(status)) - goto out; - dev = (adr >> 16) & 0xffff; - fn = adr & 0xffff; - - pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn)); - if (!pdev || hnd == handle) - break; + if (!adev) + return NULL; - pbus = pdev->subordinate; - pci_dev_put(pdev); + mutex_lock(&adev->physical_node_lock); - /* - * This function may be called for a non-PCI device that has a - * PCI parent (eg. a disk under a PCI SATA controller). In that - * case pdev->subordinate will be NULL for the parent. - */ - if (!pbus) { - dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n"); - pdev = NULL; + list_for_each_entry(pn, &adev->physical_node_list, node) { + if (dev_is_pci(pn->dev)) { + get_device(pn->dev); + pci_dev = to_pci_dev(pn->dev); break; } } -out: - list_for_each_entry_safe(node, tmp, &device_list, node) - kfree(node); - return pdev; + mutex_unlock(&adev->physical_node_lock); + + return pci_dev; } EXPORT_SYMBOL_GPL(acpi_get_pci_dev); @@ -355,7 +334,9 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev); * acpi_pci_osc_control_set - Request control of PCI root _OSC features. * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex). * @mask: Mask of _OSC bits to request control of, place to store control mask. - * @req: Mask of _OSC bits the control of is essential to the caller. + * @support: _OSC supported capability. + * @cxl_mask: Mask of CXL _OSC control bits, place to store control mask. + * @cxl_support: CXL _OSC supported capability. * * Run _OSC query for @mask and if that is successful, compare the returned * mask of control bits with @req. If all of the @req bits are set in the @@ -366,118 +347,159 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev); * _OSC bits the BIOS has granted control of, but its contents are meaningless * on failure. **/ -acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req) +static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, + u32 support, u32 *cxl_mask, + u32 cxl_support) { + u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL; struct acpi_pci_root *root; - acpi_status status = AE_OK; - u32 ctrl, capbuf[3]; + acpi_status status; + u32 ctrl, cxl_ctrl = 0, capbuf[OSC_CXL_CAPABILITY_DWORDS]; if (!mask) return AE_BAD_PARAMETER; - ctrl = *mask & OSC_PCI_CONTROL_MASKS; - if ((ctrl & req) != req) - return AE_TYPE; - root = acpi_pci_find_root(handle); if (!root) return AE_NOT_EXIST; - mutex_lock(&osc_lock); + ctrl = *mask; + *mask |= root->osc_control_set; - *mask = ctrl | root->osc_control_set; - /* No need to evaluate _OSC if the control was already granted. */ - if ((root->osc_control_set & ctrl) == ctrl) - goto out; + if (is_cxl(root)) { + cxl_ctrl = *cxl_mask; + *cxl_mask |= root->osc_ext_control_set; + } /* Need to check the available controls bits before requesting them. */ - while (*mask) { - status = acpi_pci_query_osc(root, root->osc_support_set, mask); + do { + u32 pci_missing = 0, cxl_missing = 0; + + status = acpi_pci_query_osc(root, support, mask, cxl_support, + cxl_mask); if (ACPI_FAILURE(status)) - goto out; - if (ctrl == *mask) - break; - decode_osc_control(root, "platform does not support", - ctrl & ~(*mask)); + return status; + if (is_cxl(root)) { + if (ctrl == *mask && cxl_ctrl == *cxl_mask) + break; + pci_missing = ctrl & ~(*mask); + cxl_missing = cxl_ctrl & ~(*cxl_mask); + } else { + if (ctrl == *mask) + break; + pci_missing = ctrl & ~(*mask); + } + if (pci_missing) + decode_osc_control(root, "platform does not support", + pci_missing); + if (cxl_missing) + decode_cxl_osc_control(root, "CXL platform does not support", + cxl_missing); ctrl = *mask; - } + cxl_ctrl = *cxl_mask; + } while (*mask || *cxl_mask); + + /* No need to request _OSC if the control was already granted. */ + if ((root->osc_control_set & ctrl) == ctrl && + (root->osc_ext_control_set & cxl_ctrl) == cxl_ctrl) + return AE_OK; if ((ctrl & req) != req) { decode_osc_control(root, "not requesting control; platform does not support", req & ~(ctrl)); - status = AE_SUPPORT; - goto out; + return AE_SUPPORT; } capbuf[OSC_QUERY_DWORD] = 0; capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set; capbuf[OSC_CONTROL_DWORD] = ctrl; - status = acpi_pci_run_osc(handle, capbuf, mask); - if (ACPI_SUCCESS(status)) - root->osc_control_set = *mask; -out: - mutex_unlock(&osc_lock); - return status; + if (is_cxl(root)) { + capbuf[OSC_EXT_SUPPORT_DWORD] = root->osc_ext_support_set; + capbuf[OSC_EXT_CONTROL_DWORD] = cxl_ctrl; + } + + status = acpi_pci_run_osc(root, capbuf, mask, cxl_mask); + if (ACPI_FAILURE(status)) + return status; + + root->osc_control_set = *mask; + root->osc_ext_control_set = *cxl_mask; + return AE_OK; } -EXPORT_SYMBOL(acpi_pci_osc_control_set); -static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, - bool is_pcie) +static u32 calculate_support(void) { - u32 support, control, requested; - acpi_status status; - struct acpi_device *device = root->device; - acpi_handle handle = device->handle; - - /* - * Apple always return failure on _OSC calls when _OSI("Darwin") has - * been called successfully. We know the feature set supported by the - * platform, so avoid calling _OSC at all - */ - if (x86_apple_machine) { - root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL; - decode_osc_control(root, "OS assumes control of", - root->osc_control_set); - return; - } + u32 support; /* * All supported architectures that use ACPI have support for * PCI domains, so we indicate this in _OSC support capabilities. */ support = OSC_PCI_SEGMENT_GROUPS_SUPPORT; + support |= OSC_PCI_HPX_TYPE_3_SUPPORT; if (pci_ext_cfg_avail()) support |= OSC_PCI_EXT_CONFIG_SUPPORT; if (pcie_aspm_support_enabled()) support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT; if (pci_msi_enabled()) support |= OSC_PCI_MSI_SUPPORT; + if (IS_ENABLED(CONFIG_PCIE_EDR)) + support |= OSC_PCI_EDR_SUPPORT; - decode_osc_support(root, "OS supports", support); - status = acpi_pci_osc_support(root, support); - if (ACPI_FAILURE(status)) { - *no_aspm = 1; + return support; +} - /* _OSC is optional for PCI host bridges */ - if ((status == AE_NOT_FOUND) && !is_pcie) - return; +/* + * Background on hotplug support, and making it depend on only + * CONFIG_HOTPLUG_PCI_PCIE vs. also considering CONFIG_MEMORY_HOTPLUG: + * + * CONFIG_ACPI_HOTPLUG_MEMORY does depend on CONFIG_MEMORY_HOTPLUG, but + * there is no existing _OSC for memory hotplug support. The reason is that + * ACPI memory hotplug requires the OS to acknowledge / coordinate with + * memory plug events via a scan handler. On the CXL side the equivalent + * would be if Linux supported the Mechanical Retention Lock [1], or + * otherwise had some coordination for the driver of a PCI device + * undergoing hotplug to be consulted on whether the hotplug should + * proceed or not. + * + * The concern is that if Linux says no to supporting CXL hotplug then + * the BIOS may say no to giving the OS hotplug control of any other PCIe + * device. So the question here is not whether hotplug is enabled, it's + * whether it is handled natively by the at all OS, and if + * CONFIG_HOTPLUG_PCI_PCIE is enabled then the answer is "yes". + * + * Otherwise, the plan for CXL coordinated remove, since the kernel does + * not support blocking hotplug, is to require the memory device to be + * disabled before hotplug is attempted. When CONFIG_MEMORY_HOTPLUG is + * disabled that step will fail and the remove attempt cancelled by the + * user. If that is not honored and the card is removed anyway then it + * does not matter if CONFIG_MEMORY_HOTPLUG is enabled or not, it will + * cause a crash and other badness. + * + * Therefore, just say yes to CXL hotplug and require removal to + * be coordinated by userspace unless and until the kernel grows better + * mechanisms for doing "managed" removal of devices in consultation with + * the driver. + * + * [1]: https://lore.kernel.org/all/20201122014203.4706-1-ashok.raj@intel.com/ + */ +static u32 calculate_cxl_support(void) +{ + u32 support; - dev_info(&device->dev, "_OSC failed (%s)%s\n", - acpi_format_exception(status), - pcie_aspm_support_enabled() ? "; disabling ASPM" : ""); - return; - } + support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT; + support |= OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT; + if (pci_aer_available()) + support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT; + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + support |= OSC_CXL_NATIVE_HP_SUPPORT; - if (pcie_ports_disabled) { - dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n"); - return; - } + return support; +} - if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) { - decode_osc_support(root, "not requesting OS control; OS requires", - ACPI_PCIE_REQ_SUPPORT); - return; - } +static u32 calculate_control(void) +{ + u32 control; control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL | OSC_PCI_EXPRESS_PME_CONTROL; @@ -491,19 +513,91 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC)) control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL; - if (pci_aer_available()) { - if (aer_acpi_firmware_first()) - dev_info(&device->dev, - "PCIe AER handled by firmware\n"); - else - control |= OSC_PCI_EXPRESS_AER_CONTROL; + if (pci_aer_available()) + control |= OSC_PCI_EXPRESS_AER_CONTROL; + + /* + * Per the Downstream Port Containment Related Enhancements ECN to + * the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5, + * OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC + * and EDR. + */ + if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR)) + control |= OSC_PCI_EXPRESS_DPC_CONTROL; + + return control; +} + +static u32 calculate_cxl_control(void) +{ + u32 control = 0; + + if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) + control |= OSC_CXL_ERROR_REPORTING_CONTROL; + + return control; +} + +static bool os_control_query_checks(struct acpi_pci_root *root, u32 support) +{ + struct acpi_device *device = root->device; + + if (pcie_ports_disabled) { + dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n"); + return false; + } + + if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) { + decode_osc_support(root, "not requesting OS control; OS requires", + ACPI_PCIE_REQ_SUPPORT); + return false; + } + + return true; +} + +static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) +{ + u32 support, control = 0, requested = 0; + u32 cxl_support = 0, cxl_control = 0, cxl_requested = 0; + acpi_status status; + struct acpi_device *device = root->device; + acpi_handle handle = device->handle; + + /* + * Apple always return failure on _OSC calls when _OSI("Darwin") has + * been called successfully. We know the feature set supported by the + * platform, so avoid calling _OSC at all + */ + if (x86_apple_machine) { + root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL; + decode_osc_control(root, "OS assumes control of", + root->osc_control_set); + return; + } + + support = calculate_support(); + + decode_osc_support(root, "OS supports", support); + + if (os_control_query_checks(root, support)) + requested = control = calculate_control(); + + if (is_cxl(root)) { + cxl_support = calculate_cxl_support(); + decode_cxl_osc_support(root, "OS supports", cxl_support); + cxl_requested = cxl_control = calculate_cxl_control(); } - requested = control; - status = acpi_pci_osc_control_set(handle, &control, - OSC_PCI_EXPRESS_CAPABILITY_CONTROL); + status = acpi_pci_osc_control_set(handle, &control, support, + &cxl_control, cxl_support); if (ACPI_SUCCESS(status)) { - decode_osc_control(root, "OS now controls", control); + if (control) + decode_osc_control(root, "OS now controls", control); + if (cxl_control) + decode_cxl_osc_control(root, "OS now controls", + cxl_control); + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { /* * We have ASPM control, but the FADT indicates that @@ -514,11 +608,6 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, *no_aspm = 1; } } else { - decode_osc_control(root, "OS requested", requested); - decode_osc_control(root, "platform willing to grant", control); - dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n", - acpi_format_exception(status)); - /* * We want to disable ASPM here, but aspm_disabled * needs to remain in its state from boot so that we @@ -527,6 +616,23 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, * root scan. */ *no_aspm = 1; + + /* _OSC is optional for PCI host bridges */ + if (status == AE_NOT_FOUND && !is_pcie(root)) + return; + + if (control) { + decode_osc_control(root, "OS requested", requested); + decode_osc_control(root, "platform willing to grant", control); + } + if (cxl_control) { + decode_cxl_osc_control(root, "OS requested", cxl_requested); + decode_cxl_osc_control(root, "platform willing to grant", + cxl_control); + } + + dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n", + acpi_format_exception(status)); } } @@ -540,7 +646,7 @@ static int acpi_pci_root_add(struct acpi_device *device, acpi_handle handle = device->handle; int no_aspm = 0; bool hotadd = system_state == SYSTEM_RUNNING; - bool is_pcie; + const char *acpi_hid; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -583,8 +689,8 @@ static int acpi_pci_root_add(struct acpi_device *device, root->device = device; root->segment = segment & 0xFFFF; - strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); + strscpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; if (hotadd && dmar_device_add(handle)) { @@ -592,14 +698,21 @@ static int acpi_pci_root_add(struct acpi_device *device, goto end; } - pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", + pr_info("%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), root->segment, &root->secondary); root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle); - is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0; - negotiate_os_control(root, &no_aspm, is_pcie); + acpi_hid = acpi_device_hid(root->device); + if (strcmp(acpi_hid, "PNP0A08") == 0) + root->bridge_type = ACPI_BRIDGE_TYPE_PCIE; + else if (strcmp(acpi_hid, "ACPI0016") == 0) + root->bridge_type = ACPI_BRIDGE_TYPE_CXL; + else + dev_dbg(&device->dev, "Assuming non-PCIe host bridge\n"); + + negotiate_os_control(root, &no_aspm); /* * TBD: Need PCI interface for enumeration/configuration of roots. @@ -728,9 +841,7 @@ static void acpi_pci_root_validate_resources(struct device *dev, * our resources no longer match the ACPI _CRS, but * the kernel resource tree doesn't allow overlaps. */ - if (resource_overlaps(res1, res2)) { - res2->start = min(res1->start, res2->start); - res2->end = max(res1->end, res2->end); + if (resource_union(res1, res2, res2)) { dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n", res2, res1); free = true; @@ -747,7 +858,7 @@ next: } } -static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode, +static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode, struct resource_entry *entry) { #ifdef PCI_IOBASE @@ -927,6 +1038,13 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, host_bridge->native_pme = 0; if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL)) host_bridge->native_ltr = 0; + if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL)) + host_bridge->native_dpc = 0; + + if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL)) + host_bridge->native_cxl_error = 0; + + acpi_dev_power_up_children_with_adr(device); pci_scan_child_bus(bus); pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info, @@ -942,7 +1060,6 @@ out_release_info: void __init acpi_pci_root_init(void) { - acpi_hest_init(); if (acpi_pci_disabled) return; diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index e90b61f7d2db..741bcc9d6d6a 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * pci_slot.c - ACPI PCI Slot Driver * @@ -11,15 +12,6 @@ * * Copyright (C) 2013 Huawei Tech. Co., Ltd. * Jiang Liu <jiang.liu@huawei.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -36,9 +28,6 @@ static int check_sta_before_sun; -#define _COMPONENT ACPI_PCI_COMPONENT -ACPI_MODULE_NAME("pci_slot"); - #define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */ struct acpi_pci_slot { @@ -122,7 +111,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) snprintf(name, sizeof(name), "%llu", sun); pci_slot = pci_create_slot(pci_bus, device, name, NULL); if (IS_ERR(pci_slot)) { - pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); + pr_err("pci_create_slot returned %pe\n", pci_slot); kfree(slot); return AE_OK; } diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c new file mode 100644 index 000000000000..32bdf8cbe8f2 --- /dev/null +++ b/drivers/acpi/pfr_telemetry.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI Platform Firmware Runtime Telemetry driver + * + * Copyright (C) 2021 Intel Corporation + * Author: Chen Yu <yu.c.chen@intel.com> + * + * This driver allows user space to fetch telemetry data from the + * firmware with the help of the Platform Firmware Runtime Telemetry + * interface. + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/uuid.h> + +#include <uapi/linux/pfrut.h> + +#define PFRT_LOG_EXEC_IDX 0 +#define PFRT_LOG_HISTORY_IDX 1 + +#define PFRT_LOG_ERR 0 +#define PFRT_LOG_WARN 1 +#define PFRT_LOG_INFO 2 +#define PFRT_LOG_VERB 4 + +#define PFRT_FUNC_SET_LEV 1 +#define PFRT_FUNC_GET_LEV 2 +#define PFRT_FUNC_GET_DATA 3 + +#define PFRT_REVID_1 1 +#define PFRT_REVID_2 2 +#define PFRT_DEFAULT_REV_ID PFRT_REVID_1 + +enum log_index { + LOG_STATUS_IDX = 0, + LOG_EXT_STATUS_IDX = 1, + LOG_MAX_SZ_IDX = 2, + LOG_CHUNK1_LO_IDX = 3, + LOG_CHUNK1_HI_IDX = 4, + LOG_CHUNK1_SZ_IDX = 5, + LOG_CHUNK2_LO_IDX = 6, + LOG_CHUNK2_HI_IDX = 7, + LOG_CHUNK2_SZ_IDX = 8, + LOG_ROLLOVER_CNT_IDX = 9, + LOG_RESET_CNT_IDX = 10, + LOG_NR_IDX +}; + +struct pfrt_log_device { + int index; + struct pfrt_log_info info; + struct device *parent_dev; + struct miscdevice miscdev; +}; + +/* pfrt_guid is the parameter for _DSM method */ +static const guid_t pfrt_log_guid = + GUID_INIT(0x75191659, 0x8178, 0x4D9D, 0xB8, 0x8F, 0xAC, 0x5E, + 0x5E, 0x93, 0xE8, 0xBF); + +static DEFINE_IDA(pfrt_log_ida); + +static inline struct pfrt_log_device *to_pfrt_log_dev(struct file *file) +{ + return container_of(file->private_data, struct pfrt_log_device, miscdev); +} + +static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info, + struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, in_obj, in_buf; + int ret = -EBUSY; + + memset(data_info, 0, sizeof(*data_info)); + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = pfrt_log_dev->info.log_type; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_DATA, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + if (out_obj->package.count < LOG_NR_IDX || + out_obj->package.elements[LOG_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_MAX_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_LO_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK1_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_LO_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_CHUNK2_SZ_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[LOG_RESET_CNT_IDX].type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + data_info->status = out_obj->package.elements[LOG_STATUS_IDX].integer.value; + data_info->ext_status = + out_obj->package.elements[LOG_EXT_STATUS_IDX].integer.value; + if (data_info->status != DSM_SUCCEED) { + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", data_info->status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", + data_info->ext_status); + goto free_acpi_buffer; + } + + data_info->max_data_size = + out_obj->package.elements[LOG_MAX_SZ_IDX].integer.value; + data_info->chunk1_addr_lo = + out_obj->package.elements[LOG_CHUNK1_LO_IDX].integer.value; + data_info->chunk1_addr_hi = + out_obj->package.elements[LOG_CHUNK1_HI_IDX].integer.value; + data_info->chunk1_size = + out_obj->package.elements[LOG_CHUNK1_SZ_IDX].integer.value; + data_info->chunk2_addr_lo = + out_obj->package.elements[LOG_CHUNK2_LO_IDX].integer.value; + data_info->chunk2_addr_hi = + out_obj->package.elements[LOG_CHUNK2_HI_IDX].integer.value; + data_info->chunk2_size = + out_obj->package.elements[LOG_CHUNK2_SZ_IDX].integer.value; + data_info->rollover_cnt = + out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].integer.value; + data_info->reset_cnt = + out_obj->package.elements[LOG_RESET_CNT_IDX].integer.value; + + ret = 0; + +free_acpi_buffer: + ACPI_FREE(out_obj); + + return ret; +} + +static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, *obj, in_obj, in_buf; + enum pfru_dsm_status status, ext_status; + int ret = 0; + + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = level; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_SET_LEV, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + obj = &out_obj->package.elements[0]; + status = obj->integer.value; + if (status != DSM_SUCCEED) { + obj = &out_obj->package.elements[1]; + ext_status = obj->integer.value; + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status); + ret = -EBUSY; + } + + ACPI_FREE(out_obj); + + return ret; +} + +static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev); + union acpi_object *out_obj, *obj; + enum pfru_dsm_status status, ext_status; + int ret = -EBUSY; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid, + pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_LEV, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) + return -EINVAL; + + obj = &out_obj->package.elements[0]; + if (obj->type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + status = obj->integer.value; + if (status != DSM_SUCCEED) { + obj = &out_obj->package.elements[1]; + ext_status = obj->integer.value; + dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status); + dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status); + goto free_acpi_buffer; + } + + obj = &out_obj->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + goto free_acpi_buffer; + + ret = obj->integer.value; + +free_acpi_buffer: + ACPI_FREE(out_obj); + + return ret; +} + +static int valid_log_level(u32 level) +{ + return level == PFRT_LOG_ERR || level == PFRT_LOG_WARN || + level == PFRT_LOG_INFO || level == PFRT_LOG_VERB; +} + +static int valid_log_type(u32 type) +{ + return type == PFRT_LOG_EXEC_IDX || type == PFRT_LOG_HISTORY_IDX; +} + +static inline int valid_log_revid(u32 id) +{ + return id == PFRT_REVID_1 || id == PFRT_REVID_2; +} + +static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pfrt_log_device *pfrt_log_dev = to_pfrt_log_dev(file); + struct pfrt_log_data_info data_info; + struct pfrt_log_info info; + void __user *p; + int ret = 0; + + p = (void __user *)arg; + + switch (cmd) { + case PFRT_LOG_IOC_SET_INFO: + if (copy_from_user(&info, p, sizeof(info))) + return -EFAULT; + + if (valid_log_revid(info.log_revid)) + pfrt_log_dev->info.log_revid = info.log_revid; + + if (valid_log_level(info.log_level)) { + ret = set_pfrt_log_level(info.log_level, pfrt_log_dev); + if (ret < 0) + return ret; + + pfrt_log_dev->info.log_level = info.log_level; + } + + if (valid_log_type(info.log_type)) + pfrt_log_dev->info.log_type = info.log_type; + + return 0; + + case PFRT_LOG_IOC_GET_INFO: + info.log_level = get_pfrt_log_level(pfrt_log_dev); + info.log_type = pfrt_log_dev->info.log_type; + info.log_revid = pfrt_log_dev->info.log_revid; + if (copy_to_user(p, &info, sizeof(info))) + return -EFAULT; + + return 0; + + case PFRT_LOG_IOC_GET_DATA_INFO: + ret = get_pfrt_log_data_info(&data_info, pfrt_log_dev); + if (ret) + return ret; + + if (copy_to_user(p, &data_info, sizeof(struct pfrt_log_data_info))) + return -EFAULT; + + return 0; + + default: + return -ENOTTY; + } +} + +static int +pfrt_log_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct pfrt_log_device *pfrt_log_dev; + struct pfrt_log_data_info info; + unsigned long psize, vsize; + phys_addr_t base_addr; + int ret; + + if (vma->vm_flags & VM_WRITE) + return -EROFS; + + /* changing from read to write with mprotect is not allowed */ + vm_flags_clear(vma, VM_MAYWRITE); + + pfrt_log_dev = to_pfrt_log_dev(file); + + ret = get_pfrt_log_data_info(&info, pfrt_log_dev); + if (ret) + return ret; + + base_addr = (phys_addr_t)((info.chunk2_addr_hi << 32) | info.chunk2_addr_lo); + /* pfrt update has not been launched yet */ + if (!base_addr) + return -ENODEV; + + psize = info.max_data_size; + /* base address and total buffer size must be page aligned */ + if (!PAGE_ALIGNED(base_addr) || !PAGE_ALIGNED(psize)) + return -ENODEV; + + vsize = vma->vm_end - vma->vm_start; + if (vsize > psize) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(base_addr), + vsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static const struct file_operations acpi_pfrt_log_fops = { + .owner = THIS_MODULE, + .mmap = pfrt_log_mmap, + .unlocked_ioctl = pfrt_log_ioctl, + .llseek = noop_llseek, +}; + +static void acpi_pfrt_log_remove(struct platform_device *pdev) +{ + struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev); + + misc_deregister(&pfrt_log_dev->miscdev); +} + +static void pfrt_log_put_idx(void *data) +{ + struct pfrt_log_device *pfrt_log_dev = data; + + ida_free(&pfrt_log_ida, pfrt_log_dev->index); +} + +static int acpi_pfrt_log_probe(struct platform_device *pdev) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + struct pfrt_log_device *pfrt_log_dev; + int ret; + + if (!acpi_has_method(handle, "_DSM")) { + dev_dbg(&pdev->dev, "Missing _DSM\n"); + return -ENODEV; + } + + pfrt_log_dev = devm_kzalloc(&pdev->dev, sizeof(*pfrt_log_dev), GFP_KERNEL); + if (!pfrt_log_dev) + return -ENOMEM; + + ret = ida_alloc(&pfrt_log_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + pfrt_log_dev->index = ret; + ret = devm_add_action_or_reset(&pdev->dev, pfrt_log_put_idx, pfrt_log_dev); + if (ret) + return ret; + + pfrt_log_dev->info.log_revid = PFRT_DEFAULT_REV_ID; + pfrt_log_dev->parent_dev = &pdev->dev; + + pfrt_log_dev->miscdev.minor = MISC_DYNAMIC_MINOR; + pfrt_log_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pfrt%d", + pfrt_log_dev->index); + if (!pfrt_log_dev->miscdev.name) + return -ENOMEM; + + pfrt_log_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "acpi_pfr_telemetry%d", + pfrt_log_dev->index); + if (!pfrt_log_dev->miscdev.nodename) + return -ENOMEM; + + pfrt_log_dev->miscdev.fops = &acpi_pfrt_log_fops; + pfrt_log_dev->miscdev.parent = &pdev->dev; + + ret = misc_register(&pfrt_log_dev->miscdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, pfrt_log_dev); + + return 0; +} + +static const struct acpi_device_id acpi_pfrt_log_ids[] = { + {"INTC1081"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_pfrt_log_ids); + +static struct platform_driver acpi_pfrt_log_driver = { + .driver = { + .name = "pfr_telemetry", + .acpi_match_table = acpi_pfrt_log_ids, + }, + .probe = acpi_pfrt_log_probe, + .remove = acpi_pfrt_log_remove, +}; +module_platform_driver(acpi_pfrt_log_driver); + +MODULE_DESCRIPTION("Platform Firmware Runtime Update Telemetry driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c new file mode 100644 index 000000000000..11b1c2828005 --- /dev/null +++ b/drivers/acpi/pfr_update.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI Platform Firmware Runtime Update Device driver + * + * Copyright (C) 2021 Intel Corporation + * Author: Chen Yu <yu.c.chen@intel.com> + * + * pfr_update driver is used for Platform Firmware Runtime + * Update, which includes the code injection and driver update. + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/uuid.h> + +#include <uapi/linux/pfrut.h> + +#define PFRU_FUNC_STANDARD_QUERY 0 +#define PFRU_FUNC_QUERY_UPDATE_CAP 1 +#define PFRU_FUNC_QUERY_BUF 2 +#define PFRU_FUNC_START 3 + +#define PFRU_CODE_INJECT_TYPE 1 +#define PFRU_DRIVER_UPDATE_TYPE 2 + +#define PFRU_REVID_1 1 +#define PFRU_REVID_2 2 +#define PFRU_DEFAULT_REV_ID PFRU_REVID_1 + +enum cap_index { + CAP_STATUS_IDX = 0, + CAP_UPDATE_IDX = 1, + CAP_CODE_TYPE_IDX = 2, + CAP_FW_VER_IDX = 3, + CAP_CODE_RT_VER_IDX = 4, + CAP_DRV_TYPE_IDX = 5, + CAP_DRV_RT_VER_IDX = 6, + CAP_DRV_SVN_IDX = 7, + CAP_PLAT_ID_IDX = 8, + CAP_OEM_ID_IDX = 9, + CAP_OEM_INFO_IDX = 10, + CAP_NR_IDX +}; + +enum buf_index { + BUF_STATUS_IDX = 0, + BUF_EXT_STATUS_IDX = 1, + BUF_ADDR_LOW_IDX = 2, + BUF_ADDR_HI_IDX = 3, + BUF_SIZE_IDX = 4, + BUF_NR_IDX +}; + +enum update_index { + UPDATE_STATUS_IDX = 0, + UPDATE_EXT_STATUS_IDX = 1, + UPDATE_AUTH_TIME_LOW_IDX = 2, + UPDATE_AUTH_TIME_HI_IDX = 3, + UPDATE_EXEC_TIME_LOW_IDX = 4, + UPDATE_EXEC_TIME_HI_IDX = 5, + UPDATE_NR_IDX +}; + +enum pfru_start_action { + START_STAGE = 0, + START_ACTIVATE = 1, + START_STAGE_ACTIVATE = 2, +}; + +struct pfru_device { + u32 rev_id, index; + struct device *parent_dev; + struct miscdevice miscdev; +}; + +static DEFINE_IDA(pfru_ida); + +/* + * Manual reference: + * https://uefi.org/sites/default/files/resources/Intel_MM_OS_Interface_Spec_Rev100.pdf + * + * pfru_guid is the parameter for _DSM method + */ +static const guid_t pfru_guid = + GUID_INIT(0xECF9533B, 0x4A3C, 0x4E89, 0x93, 0x9E, 0xC7, 0x71, + 0x12, 0x60, 0x1C, 0x6D); + +/* pfru_code_inj_guid is the UUID to identify code injection EFI capsule file */ +static const guid_t pfru_code_inj_guid = + GUID_INIT(0xB2F84B79, 0x7B6E, 0x4E45, 0x88, 0x5F, 0x3F, 0xB9, + 0xBB, 0x18, 0x54, 0x02); + +/* pfru_drv_update_guid is the UUID to identify driver update EFI capsule file */ +static const guid_t pfru_drv_update_guid = + GUID_INIT(0x4569DD8C, 0x75F1, 0x429A, 0xA3, 0xD6, 0x24, 0xDE, + 0x80, 0x97, 0xA0, 0xDF); + +static inline int pfru_valid_revid(u32 id) +{ + return id == PFRU_REVID_1 || id == PFRU_REVID_2; +} + +static inline struct pfru_device *to_pfru_dev(struct file *file) +{ + return container_of(file->private_data, struct pfru_device, miscdev); +} + +static int query_capability(struct pfru_update_cap_info *cap_hdr, + struct pfru_device *pfru_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev); + union acpi_object *out_obj; + int ret = -EINVAL; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, + PFRU_FUNC_QUERY_UPDATE_CAP, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Query cap failed with no object\n"); + return ret; + } + + if (out_obj->package.count < CAP_NR_IDX || + out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_UPDATE_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_CODE_TYPE_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_FW_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_CODE_RT_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_DRV_TYPE_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_DRV_RT_VER_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER || + out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) { + dev_dbg(pfru_dev->parent_dev, + "Query cap failed with invalid package count/type\n"); + goto free_acpi_buffer; + } + + cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value; + if (cap_hdr->status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n", + cap_hdr->status); + goto free_acpi_buffer; + } + + cap_hdr->update_cap = out_obj->package.elements[CAP_UPDATE_IDX].integer.value; + memcpy(&cap_hdr->code_type, + out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.pointer, + out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.length); + cap_hdr->fw_version = + out_obj->package.elements[CAP_FW_VER_IDX].integer.value; + cap_hdr->code_rt_version = + out_obj->package.elements[CAP_CODE_RT_VER_IDX].integer.value; + memcpy(&cap_hdr->drv_type, + out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.pointer, + out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.length); + cap_hdr->drv_rt_version = + out_obj->package.elements[CAP_DRV_RT_VER_IDX].integer.value; + cap_hdr->drv_svn = + out_obj->package.elements[CAP_DRV_SVN_IDX].integer.value; + memcpy(&cap_hdr->platform_id, + out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.pointer, + out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.length); + memcpy(&cap_hdr->oem_id, + out_obj->package.elements[CAP_OEM_ID_IDX].buffer.pointer, + out_obj->package.elements[CAP_OEM_ID_IDX].buffer.length); + cap_hdr->oem_info_len = + out_obj->package.elements[CAP_OEM_INFO_IDX].buffer.length; + + ret = 0; + +free_acpi_buffer: + ACPI_FREE(out_obj); + + return ret; +} + +static int query_buffer(struct pfru_com_buf_info *info, + struct pfru_device *pfru_dev) +{ + acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev); + union acpi_object *out_obj; + int ret = -EINVAL; + + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF, + NULL, ACPI_TYPE_PACKAGE); + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with no object\n"); + return ret; + } + + if (out_obj->package.count < BUF_NR_IDX || + out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) { + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with invalid package count/type\n"); + goto free_acpi_buffer; + } + + info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value; + info->ext_status = + out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value; + if (info->status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with Error Status:%d\n", info->status); + dev_dbg(pfru_dev->parent_dev, + "Query buf failed with Error Extended Status:%d\n", info->ext_status); + + goto free_acpi_buffer; + } + + info->addr_lo = + out_obj->package.elements[BUF_ADDR_LOW_IDX].integer.value; + info->addr_hi = + out_obj->package.elements[BUF_ADDR_HI_IDX].integer.value; + info->buf_size = out_obj->package.elements[BUF_SIZE_IDX].integer.value; + + ret = 0; + +free_acpi_buffer: + ACPI_FREE(out_obj); + + return ret; +} + +static int get_image_type(const struct efi_manage_capsule_image_header *img_hdr, + struct pfru_device *pfru_dev) +{ + const efi_guid_t *image_type_id = &img_hdr->image_type_id; + + /* check whether this is a code injection or driver update */ + if (guid_equal(image_type_id, &pfru_code_inj_guid)) + return PFRU_CODE_INJECT_TYPE; + + if (guid_equal(image_type_id, &pfru_drv_update_guid)) + return PFRU_DRIVER_UPDATE_TYPE; + + return -EINVAL; +} + +static int adjust_efi_size(const struct efi_manage_capsule_image_header *img_hdr, + int size) +{ + /* + * The (u64 hw_ins) was introduced in UEFI spec version 2, + * and (u64 capsule_support) was introduced in version 3. + * The size needs to be adjusted accordingly. That is to + * say, version 1 should subtract the size of hw_ins+capsule_support, + * and version 2 should sbstract the size of capsule_support. + */ + size += sizeof(struct efi_manage_capsule_image_header); + switch (img_hdr->ver) { + case 1: + return size - 2 * sizeof(u64); + + case 2: + return size - sizeof(u64); + + default: + /* only support version 1 and 2 */ + return -EINVAL; + } +} + +static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, + struct pfru_device *pfru_dev) +{ + struct pfru_payload_hdr *payload_hdr; + const efi_capsule_header_t *cap_hdr = data; + const struct efi_manage_capsule_header *m_hdr; + const struct efi_manage_capsule_image_header *m_img_hdr; + const struct efi_image_auth *auth; + int type, size; + + /* + * If the code in the capsule is older than the current + * firmware code, the update will be rejected by the firmware, + * so check the version of it upfront without engaging the + * Management Mode update mechanism which may be costly. + */ + size = cap_hdr->headersize; + m_hdr = data + size; + /* + * Current data structure size plus variable array indicated + * by number of (emb_drv_cnt + payload_cnt) + */ + size += offsetof(struct efi_manage_capsule_header, offset_list) + + (m_hdr->emb_drv_cnt + m_hdr->payload_cnt) * sizeof(u64); + m_img_hdr = data + size; + + type = get_image_type(m_img_hdr, pfru_dev); + if (type < 0) { + dev_dbg(pfru_dev->parent_dev, "Invalid image type\n"); + return false; + } + + size = adjust_efi_size(m_img_hdr, size); + if (size < 0) { + dev_dbg(pfru_dev->parent_dev, "Invalid image size\n"); + return false; + } + + auth = data + size; + size += sizeof(u64) + auth->auth_info.hdr.len; + payload_hdr = (struct pfru_payload_hdr *)(data + size); + + /* finally compare the version */ + if (type == PFRU_CODE_INJECT_TYPE) + return payload_hdr->rt_ver >= cap->code_rt_version; + + return payload_hdr->svn_ver >= cap->drv_svn; +} + +static void print_update_debug_info(struct pfru_updated_result *result, + struct pfru_device *pfru_dev) +{ + dev_dbg(pfru_dev->parent_dev, "Update result:\n"); + dev_dbg(pfru_dev->parent_dev, "Authentication Time Low:%lld\n", + result->low_auth_time); + dev_dbg(pfru_dev->parent_dev, "Authentication Time High:%lld\n", + result->high_auth_time); + dev_dbg(pfru_dev->parent_dev, "Execution Time Low:%lld\n", + result->low_exec_time); + dev_dbg(pfru_dev->parent_dev, "Execution Time High:%lld\n", + result->high_exec_time); +} + +static int start_update(int action, struct pfru_device *pfru_dev) +{ + union acpi_object *out_obj, in_obj, in_buf; + struct pfru_updated_result update_result; + acpi_handle handle; + int ret = -EINVAL; + + memset(&in_obj, 0, sizeof(in_obj)); + memset(&in_buf, 0, sizeof(in_buf)); + in_obj.type = ACPI_TYPE_PACKAGE; + in_obj.package.count = 1; + in_obj.package.elements = &in_buf; + in_buf.type = ACPI_TYPE_INTEGER; + in_buf.integer.value = action; + + handle = ACPI_HANDLE(pfru_dev->parent_dev); + out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid, + pfru_dev->rev_id, PFRU_FUNC_START, + &in_obj, ACPI_TYPE_PACKAGE); + if (!out_obj) { + dev_dbg(pfru_dev->parent_dev, + "Update failed to start with no object\n"); + return ret; + } + + if (out_obj->package.count < UPDATE_NR_IDX || + out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER || + out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) { + dev_dbg(pfru_dev->parent_dev, + "Update failed with invalid package count/type\n"); + goto free_acpi_buffer; + } + + update_result.status = + out_obj->package.elements[UPDATE_STATUS_IDX].integer.value; + update_result.ext_status = + out_obj->package.elements[UPDATE_EXT_STATUS_IDX].integer.value; + + if (update_result.status != DSM_SUCCEED) { + ret = -EBUSY; + dev_dbg(pfru_dev->parent_dev, + "Update failed with Error Status:%d\n", update_result.status); + dev_dbg(pfru_dev->parent_dev, + "Update failed with Error Extended Status:%d\n", + update_result.ext_status); + + goto free_acpi_buffer; + } + + update_result.low_auth_time = + out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].integer.value; + update_result.high_auth_time = + out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].integer.value; + update_result.low_exec_time = + out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].integer.value; + update_result.high_exec_time = + out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].integer.value; + + print_update_debug_info(&update_result, pfru_dev); + ret = 0; + +free_acpi_buffer: + ACPI_FREE(out_obj); + + return ret; +} + +static long pfru_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pfru_update_cap_info cap_hdr; + struct pfru_device *pfru_dev = to_pfru_dev(file); + void __user *p = (void __user *)arg; + u32 rev; + int ret; + + switch (cmd) { + case PFRU_IOC_QUERY_CAP: + ret = query_capability(&cap_hdr, pfru_dev); + if (ret) + return ret; + + if (copy_to_user(p, &cap_hdr, sizeof(cap_hdr))) + return -EFAULT; + + return 0; + + case PFRU_IOC_SET_REV: + if (copy_from_user(&rev, p, sizeof(rev))) + return -EFAULT; + + if (!pfru_valid_revid(rev)) + return -EINVAL; + + pfru_dev->rev_id = rev; + + return 0; + + case PFRU_IOC_STAGE: + return start_update(START_STAGE, pfru_dev); + + case PFRU_IOC_ACTIVATE: + return start_update(START_ACTIVATE, pfru_dev); + + case PFRU_IOC_STAGE_ACTIVATE: + return start_update(START_STAGE_ACTIVATE, pfru_dev); + + default: + return -ENOTTY; + } +} + +static ssize_t pfru_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct pfru_device *pfru_dev = to_pfru_dev(file); + struct pfru_update_cap_info cap; + struct pfru_com_buf_info buf_info; + phys_addr_t phy_addr; + struct iov_iter iter; + struct iovec iov; + char *buf_ptr; + int ret; + + ret = query_buffer(&buf_info, pfru_dev); + if (ret) + return ret; + + if (len > buf_info.buf_size) { + dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n"); + return -EINVAL; + } + + iov.iov_base = (void __user *)buf; + iov.iov_len = len; + iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len); + + /* map the communication buffer */ + phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo); + buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB); + if (!buf_ptr) { + dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n"); + return -ENOMEM; + } + + if (!copy_from_iter_full(buf_ptr, len, &iter)) { + dev_dbg(pfru_dev->parent_dev, + "Failed to copy the data from the user space buffer\n"); + ret = -EINVAL; + goto unmap; + } + + /* check if the capsule header has a valid version number */ + ret = query_capability(&cap, pfru_dev); + if (ret) + goto unmap; + + if (!applicable_image(buf_ptr, &cap, pfru_dev)) + ret = -EINVAL; + +unmap: + memunmap(buf_ptr); + + return ret ?: len; +} + +static const struct file_operations acpi_pfru_fops = { + .owner = THIS_MODULE, + .write = pfru_write, + .unlocked_ioctl = pfru_ioctl, + .llseek = noop_llseek, +}; + +static void acpi_pfru_remove(struct platform_device *pdev) +{ + struct pfru_device *pfru_dev = platform_get_drvdata(pdev); + + misc_deregister(&pfru_dev->miscdev); +} + +static void pfru_put_idx(void *data) +{ + struct pfru_device *pfru_dev = data; + + ida_free(&pfru_ida, pfru_dev->index); +} + +static int acpi_pfru_probe(struct platform_device *pdev) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + struct pfru_device *pfru_dev; + int ret; + + if (!acpi_has_method(handle, "_DSM")) { + dev_dbg(&pdev->dev, "Missing _DSM\n"); + return -ENODEV; + } + + pfru_dev = devm_kzalloc(&pdev->dev, sizeof(*pfru_dev), GFP_KERNEL); + if (!pfru_dev) + return -ENOMEM; + + ret = ida_alloc(&pfru_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + pfru_dev->index = ret; + ret = devm_add_action_or_reset(&pdev->dev, pfru_put_idx, pfru_dev); + if (ret) + return ret; + + pfru_dev->rev_id = PFRU_DEFAULT_REV_ID; + pfru_dev->parent_dev = &pdev->dev; + + pfru_dev->miscdev.minor = MISC_DYNAMIC_MINOR; + pfru_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pfru%d", pfru_dev->index); + if (!pfru_dev->miscdev.name) + return -ENOMEM; + + pfru_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "acpi_pfr_update%d", pfru_dev->index); + if (!pfru_dev->miscdev.nodename) + return -ENOMEM; + + pfru_dev->miscdev.fops = &acpi_pfru_fops; + pfru_dev->miscdev.parent = &pdev->dev; + + ret = misc_register(&pfru_dev->miscdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, pfru_dev); + + return 0; +} + +static const struct acpi_device_id acpi_pfru_ids[] = { + {"INTC1080"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, acpi_pfru_ids); + +static struct platform_driver acpi_pfru_driver = { + .driver = { + .name = "pfr_update", + .acpi_match_table = acpi_pfru_ids, + }, + .probe = acpi_pfru_probe, + .remove = acpi_pfru_remove, +}; +module_platform_driver(acpi_pfru_driver); + +MODULE_DESCRIPTION("Platform Firmware Runtime Update device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c new file mode 100644 index 000000000000..ea04a8c69215 --- /dev/null +++ b/drivers/acpi/platform_profile.c @@ -0,0 +1,718 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* Platform profile sysfs interface */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/platform_profile.h> +#include <linux/sysfs.h> + +#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev)) + +static DEFINE_MUTEX(profile_lock); + +struct platform_profile_handler { + const char *name; + struct device dev; + int minor; + unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + const struct platform_profile_ops *ops; +}; + +struct aggregate_choices_data { + unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + int count; +}; + +static const char * const profile_names[] = { + [PLATFORM_PROFILE_LOW_POWER] = "low-power", + [PLATFORM_PROFILE_COOL] = "cool", + [PLATFORM_PROFILE_QUIET] = "quiet", + [PLATFORM_PROFILE_BALANCED] = "balanced", + [PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance", + [PLATFORM_PROFILE_PERFORMANCE] = "performance", + [PLATFORM_PROFILE_MAX_POWER] = "max-power", + [PLATFORM_PROFILE_CUSTOM] = "custom", +}; +static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); + +static DEFINE_IDA(platform_profile_ida); + +/** + * _commmon_choices_show - Show the available profile choices + * @choices: The available profile choices + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t _commmon_choices_show(unsigned long *choices, char *buf) +{ + int i, len = 0; + + for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) { + if (len == 0) + len += sysfs_emit_at(buf, len, "%s", profile_names[i]); + else + len += sysfs_emit_at(buf, len, " %s", profile_names[i]); + } + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +/** + * _store_class_profile - Set the profile for a class device + * @dev: The class device + * @data: The profile to set + * + * Return: 0 on success, -errno on failure + */ +static int _store_class_profile(struct device *dev, void *data) +{ + struct platform_profile_handler *handler; + int *bit = (int *)data; + + lockdep_assert_held(&profile_lock); + handler = to_pprof_handler(dev); + if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices)) + return -EOPNOTSUPP; + + return handler->ops->profile_set(dev, *bit); +} + +/** + * _notify_class_profile - Notify the class device of a profile change + * @dev: The class device + * @data: Unused + * + * Return: 0 on success, -errno on failure + */ +static int _notify_class_profile(struct device *dev, void *data) +{ + struct platform_profile_handler *handler = to_pprof_handler(dev); + + lockdep_assert_held(&profile_lock); + sysfs_notify(&handler->dev.kobj, NULL, "profile"); + kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE); + + return 0; +} + +/** + * get_class_profile - Show the current profile for a class device + * @dev: The class device + * @profile: The profile to return + * + * Return: 0 on success, -errno on failure + */ +static int get_class_profile(struct device *dev, + enum platform_profile_option *profile) +{ + struct platform_profile_handler *handler; + enum platform_profile_option val; + int err; + + lockdep_assert_held(&profile_lock); + handler = to_pprof_handler(dev); + err = handler->ops->profile_get(dev, &val); + if (err) { + pr_err("Failed to get profile for handler %s\n", handler->name); + return err; + } + + if (WARN_ON(val >= PLATFORM_PROFILE_LAST)) + return -EINVAL; + *profile = val; + + return 0; +} + +/** + * name_show - Show the name of the profile handler + * @dev: The device + * @attr: The attribute + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct platform_profile_handler *handler = to_pprof_handler(dev); + + return sysfs_emit(buf, "%s\n", handler->name); +} +static DEVICE_ATTR_RO(name); + +/** + * choices_show - Show the available profile choices + * @dev: The device + * @attr: The attribute + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t choices_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_profile_handler *handler = to_pprof_handler(dev); + + return _commmon_choices_show(handler->choices, buf); +} +static DEVICE_ATTR_RO(choices); + +/** + * profile_show - Show the current profile for a class device + * @dev: The device + * @attr: The attribute + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t profile_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + enum platform_profile_option profile = PLATFORM_PROFILE_LAST; + int err; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + err = get_class_profile(dev, &profile); + if (err) + return err; + } + + return sysfs_emit(buf, "%s\n", profile_names[profile]); +} + +/** + * profile_store - Set the profile for a class device + * @dev: The device + * @attr: The attribute + * @buf: The buffer to read from + * @count: The number of bytes to read + * + * Return: The number of bytes read + */ +static ssize_t profile_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int index, ret; + + index = sysfs_match_string(profile_names, buf); + if (index < 0) + return -EINVAL; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + ret = _store_class_profile(dev, &index); + if (ret) + return ret; + } + + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + return count; +} +static DEVICE_ATTR_RW(profile); + +static struct attribute *profile_attrs[] = { + &dev_attr_name.attr, + &dev_attr_choices.attr, + &dev_attr_profile.attr, + NULL +}; +ATTRIBUTE_GROUPS(profile); + +static void pprof_device_release(struct device *dev) +{ + struct platform_profile_handler *pprof = to_pprof_handler(dev); + + kfree(pprof); +} + +static const struct class platform_profile_class = { + .name = "platform-profile", + .dev_groups = profile_groups, + .dev_release = pprof_device_release, +}; + +/** + * _aggregate_choices - Aggregate the available profile choices + * @dev: The device + * @arg: struct aggregate_choices_data, with it's aggregate member bitmap + * initially filled with ones + * + * Return: 0 on success, -errno on failure + */ +static int _aggregate_choices(struct device *dev, void *arg) +{ + unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + struct aggregate_choices_data *data = arg; + struct platform_profile_handler *handler; + + lockdep_assert_held(&profile_lock); + + handler = to_pprof_handler(dev); + bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST); + bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST); + data->count++; + + return 0; +} + +/** + * _remove_hidden_choices - Remove hidden choices from aggregate data + * @dev: The device + * @arg: struct aggregate_choices_data + * + * Return: 0 on success, -errno on failure + */ +static int _remove_hidden_choices(struct device *dev, void *arg) +{ + struct aggregate_choices_data *data = arg; + struct platform_profile_handler *handler; + + lockdep_assert_held(&profile_lock); + handler = to_pprof_handler(dev); + bitmap_andnot(data->aggregate, handler->choices, + handler->hidden_choices, PLATFORM_PROFILE_LAST); + + return 0; +} + +/** + * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface + * @kobj: The kobject + * @attr: The attribute + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t platform_profile_choices_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; + int err; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + err = class_for_each_device(&platform_profile_class, NULL, + &data, _aggregate_choices); + if (err) + return err; + if (data.count == 1) { + err = class_for_each_device(&platform_profile_class, NULL, + &data, _remove_hidden_choices); + if (err) + return err; + } + } + + /* no profile handler registered any more */ + if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST)) + return -EINVAL; + + return _commmon_choices_show(data.aggregate, buf); +} + +/** + * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface + * @dev: The device + * @data: The profile to return + * + * Return: 0 on success, -errno on failure + */ +static int _aggregate_profiles(struct device *dev, void *data) +{ + enum platform_profile_option *profile = data; + enum platform_profile_option val; + int err; + + err = get_class_profile(dev, &val); + if (err) + return err; + + if (*profile != PLATFORM_PROFILE_LAST && *profile != val) + *profile = PLATFORM_PROFILE_CUSTOM; + else + *profile = val; + + return 0; +} + +/** + * _store_and_notify - Store and notify a class from legacy sysfs interface + * @dev: The device + * @data: The profile to return + * + * Return: 0 on success, -errno on failure + */ +static int _store_and_notify(struct device *dev, void *data) +{ + enum platform_profile_option *profile = data; + int err; + + err = _store_class_profile(dev, profile); + if (err) + return err; + return _notify_class_profile(dev, NULL); +} + +/** + * platform_profile_show - Show the current profile for legacy sysfs interface + * @kobj: The kobject + * @attr: The attribute + * @buf: The buffer to write to + * + * Return: The number of bytes written + */ +static ssize_t platform_profile_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + enum platform_profile_option profile = PLATFORM_PROFILE_LAST; + int err; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + err = class_for_each_device(&platform_profile_class, NULL, + &profile, _aggregate_profiles); + if (err) + return err; + } + + /* no profile handler registered any more */ + if (profile == PLATFORM_PROFILE_LAST) + return -EINVAL; + + return sysfs_emit(buf, "%s\n", profile_names[profile]); +} + +/** + * platform_profile_store - Set the profile for legacy sysfs interface + * @kobj: The kobject + * @attr: The attribute + * @buf: The buffer to read from + * @count: The number of bytes to read + * + * Return: The number of bytes read + */ +static ssize_t platform_profile_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; + int ret; + int i; + + /* Scan for a matching profile */ + i = sysfs_match_string(profile_names, buf); + if (i < 0 || i == PLATFORM_PROFILE_CUSTOM) + return -EINVAL; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + ret = class_for_each_device(&platform_profile_class, NULL, + &data, _aggregate_choices); + if (ret) + return ret; + if (!test_bit(i, data.aggregate)) + return -EOPNOTSUPP; + + ret = class_for_each_device(&platform_profile_class, NULL, &i, + _store_and_notify); + if (ret) + return ret; + } + + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + return count; +} + +static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices); +static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile); + +static struct attribute *platform_profile_attrs[] = { + &attr_platform_profile_choices.attr, + &attr_platform_profile.attr, + NULL +}; + +static int profile_class_registered(struct device *dev, const void *data) +{ + return 1; +} + +static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev; + + dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered); + if (!dev) + return 0; + + put_device(dev); + + return attr->mode; +} + +static const struct attribute_group platform_profile_group = { + .attrs = platform_profile_attrs, + .is_visible = profile_class_is_visible, +}; + +/** + * platform_profile_notify - Notify class device and legacy sysfs interface + * @dev: The class device + */ +void platform_profile_notify(struct device *dev) +{ + scoped_cond_guard(mutex_intr, return, &profile_lock) { + _notify_class_profile(dev, NULL); + } + sysfs_notify(acpi_kobj, NULL, "platform_profile"); +} +EXPORT_SYMBOL_GPL(platform_profile_notify); + +/** + * platform_profile_cycle - Cycles profiles available on all registered class devices + * + * Return: 0 on success, -errno on failure + */ +int platform_profile_cycle(void) +{ + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; + enum platform_profile_option next = PLATFORM_PROFILE_LAST; + enum platform_profile_option profile = PLATFORM_PROFILE_LAST; + int err; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { + err = class_for_each_device(&platform_profile_class, NULL, + &profile, _aggregate_profiles); + if (err) + return err; + + if (profile == PLATFORM_PROFILE_MAX_POWER || + profile == PLATFORM_PROFILE_CUSTOM || + profile == PLATFORM_PROFILE_LAST) + return -EINVAL; + + err = class_for_each_device(&platform_profile_class, NULL, + &data, _aggregate_choices); + if (err) + return err; + + /* never iterate into a custom or max power if all drivers supported it */ + clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate); + clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate); + + next = find_next_bit_wrap(data.aggregate, + PLATFORM_PROFILE_LAST, + profile + 1); + + err = class_for_each_device(&platform_profile_class, NULL, &next, + _store_and_notify); + + if (err) + return err; + } + + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + return 0; +} +EXPORT_SYMBOL_GPL(platform_profile_cycle); + +/** + * platform_profile_register - Creates and registers a platform profile class device + * @dev: Parent device + * @name: Name of the class device + * @drvdata: Driver data that will be attached to the class device + * @ops: Platform profile's mandatory operations + * + * Return: pointer to the new class device on success, ERR_PTR on failure + */ +struct device *platform_profile_register(struct device *dev, const char *name, + void *drvdata, + const struct platform_profile_ops *ops) +{ + struct device *ppdev; + int minor; + int err; + + /* Sanity check */ + if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get || + !ops->profile_set || !ops->probe)) + return ERR_PTR(-EINVAL); + + struct platform_profile_handler *pprof __free(kfree) = kzalloc( + sizeof(*pprof), GFP_KERNEL); + if (!pprof) + return ERR_PTR(-ENOMEM); + + err = ops->probe(drvdata, pprof->choices); + if (err) { + dev_err(dev, "platform_profile probe failed\n"); + return ERR_PTR(err); + } + + if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) { + dev_err(dev, "Failed to register platform_profile class device with empty choices\n"); + return ERR_PTR(-EINVAL); + } + + if (ops->hidden_choices) { + err = ops->hidden_choices(drvdata, pprof->hidden_choices); + if (err) { + dev_err(dev, "platform_profile hidden_choices failed\n"); + return ERR_PTR(err); + } + } + + guard(mutex)(&profile_lock); + + /* create class interface for individual handler */ + minor = ida_alloc(&platform_profile_ida, GFP_KERNEL); + if (minor < 0) + return ERR_PTR(minor); + + pprof->name = name; + pprof->ops = ops; + pprof->minor = minor; + pprof->dev.class = &platform_profile_class; + pprof->dev.parent = dev; + dev_set_drvdata(&pprof->dev, drvdata); + dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor); + /* device_register() takes ownership of pprof/ppdev */ + ppdev = &no_free_ptr(pprof)->dev; + err = device_register(ppdev); + if (err) { + put_device(ppdev); + goto cleanup_ida; + } + + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + err = sysfs_update_group(acpi_kobj, &platform_profile_group); + if (err) + goto cleanup_cur; + + return ppdev; + +cleanup_cur: + device_unregister(ppdev); + +cleanup_ida: + ida_free(&platform_profile_ida, minor); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(platform_profile_register); + +/** + * platform_profile_remove - Unregisters a platform profile class device + * @dev: Class device + */ +void platform_profile_remove(struct device *dev) +{ + struct platform_profile_handler *pprof; + + if (IS_ERR_OR_NULL(dev)) + return; + + pprof = to_pprof_handler(dev); + + guard(mutex)(&profile_lock); + + ida_free(&platform_profile_ida, pprof->minor); + device_unregister(&pprof->dev); + + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + sysfs_update_group(acpi_kobj, &platform_profile_group); +} +EXPORT_SYMBOL_GPL(platform_profile_remove); + +static void devm_platform_profile_release(struct device *dev, void *res) +{ + struct device **ppdev = res; + + platform_profile_remove(*ppdev); +} + +/** + * devm_platform_profile_register - Device managed version of platform_profile_register + * @dev: Parent device + * @name: Name of the class device + * @drvdata: Driver data that will be attached to the class device + * @ops: Platform profile's mandatory operations + * + * Return: pointer to the new class device on success, ERR_PTR on failure + */ +struct device *devm_platform_profile_register(struct device *dev, const char *name, + void *drvdata, + const struct platform_profile_ops *ops) +{ + struct device *ppdev; + struct device **dr; + + dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return ERR_PTR(-ENOMEM); + + ppdev = platform_profile_register(dev, name, drvdata, ops); + if (IS_ERR(ppdev)) { + devres_free(dr); + return ppdev; + } + + *dr = ppdev; + devres_add(dev, dr); + + return ppdev; +} +EXPORT_SYMBOL_GPL(devm_platform_profile_register); + +static int __init platform_profile_init(void) +{ + int err; + + if (acpi_disabled) + return -EOPNOTSUPP; + + err = class_register(&platform_profile_class); + if (err) + return err; + + err = sysfs_create_group(acpi_kobj, &platform_profile_group); + if (err) + class_unregister(&platform_profile_class); + + return err; +} + +static void __exit platform_profile_exit(void) +{ + sysfs_remove_group(acpi_kobj, &platform_profile_group); + class_unregister(&platform_profile_class); +} +module_init(platform_profile_init); +module_exit(platform_profile_exit); + +MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>"); +MODULE_DESCRIPTION("ACPI platform profile sysfs interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/pmic/Kconfig b/drivers/acpi/pmic/Kconfig new file mode 100644 index 000000000000..f84b8f6038dc --- /dev/null +++ b/drivers/acpi/pmic/Kconfig @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0 + +menuconfig PMIC_OPREGION + bool "PMIC (Power Management Integrated Circuit) operation region support" + help + Select this option to enable support for ACPI operation + region of the PMIC chip. The operation region can be used + to control power rails and sensor reading/writing on the + PMIC chip. + +if PMIC_OPREGION + +config BYTCRC_PMIC_OPREGION + bool "ACPI operation region support for Bay Trail Crystal Cove PMIC" + depends on INTEL_SOC_PMIC + help + This config adds ACPI operation region support for the Bay Trail + version of the Crystal Cove PMIC. + +config CHTCRC_PMIC_OPREGION + bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC" + depends on INTEL_SOC_PMIC + help + This config adds ACPI operation region support for the Cherry Trail + version of the Crystal Cove PMIC. + +config XPOWER_PMIC_OPREGION + bool "ACPI operation region support for XPower AXP288 PMIC" + depends on MFD_AXP20X_I2C && IOSF_MBI=y + help + This config adds ACPI operation region support for XPower AXP288 PMIC. + +config BXT_WC_PMIC_OPREGION + bool "ACPI operation region support for BXT WhiskeyCove PMIC" + depends on INTEL_SOC_PMIC_BXTWC + help + This config adds ACPI operation region support for BXT WhiskeyCove PMIC. + +config CHT_WC_PMIC_OPREGION + bool "ACPI operation region support for CHT Whiskey Cove PMIC" + depends on INTEL_SOC_PMIC_CHTWC + help + This config adds ACPI operation region support for CHT Whiskey Cove PMIC. + +config CHT_DC_TI_PMIC_OPREGION + bool "ACPI operation region support for Dollar Cove TI PMIC" + depends on INTEL_SOC_PMIC_CHTDC_TI + help + This config adds ACPI operation region support for Dollar Cove TI PMIC. + +endif # PMIC_OPREGION + +config TPS68470_PMIC_OPREGION + bool "ACPI operation region support for TPS68470 PMIC" + depends on INTEL_SKL_INT3472 + help + This config adds ACPI operation region support for TI TPS68470 PMIC. + TPS68470 device is an advanced power management unit that powers + a Compact Camera Module (CCM), generates clocks for image sensors, + drives a dual LED for flash and incorporates two LED drivers for + general purpose indicators. + This driver enables ACPI operation region support control voltage + regulators and clocks. + + This option is a bool as it provides an ACPI operation + region, which must be available before any of the devices + using this, are probed. diff --git a/drivers/acpi/pmic/Makefile b/drivers/acpi/pmic/Makefile new file mode 100644 index 000000000000..cd072c64920c --- /dev/null +++ b/drivers/acpi/pmic/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_PMIC_OPREGION) += intel_pmic.o +obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += intel_pmic_bytcrc.o +obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += intel_pmic_chtcrc.o +obj-$(CONFIG_XPOWER_PMIC_OPREGION) += intel_pmic_xpower.o +obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += intel_pmic_bxtwc.o +obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += intel_pmic_chtwc.o +obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += intel_pmic_chtdc_ti.o +obj-$(CONFIG_TPS68470_PMIC_OPREGION) += tps68470_pmic.o diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c index ca18e0d23df9..134e9ca8eaa2 100644 --- a/drivers/acpi/pmic/intel_pmic.c +++ b/drivers/acpi/pmic/intel_pmic.c @@ -1,20 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * intel_pmic.c - Intel PMIC operation region driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/export.h> #include <linux/acpi.h> +#include <linux/mfd/intel_soc_pmic.h> #include <linux/regmap.h> #include <acpi/acpi_lpat.h> #include "intel_pmic.h" @@ -32,11 +25,13 @@ struct intel_pmic_opregion { struct mutex lock; struct acpi_lpat_conversion_table *lpat_table; struct regmap *regmap; - struct intel_pmic_opregion_data *data; + const struct intel_pmic_opregion_data *data; struct intel_pmic_regs_handler_ctx ctx; }; -static int pmic_get_reg_bit(int address, struct pmic_table *table, +static struct intel_pmic_opregion *intel_pmic_opregion; + +static int pmic_get_reg_bit(int address, const struct pmic_table *table, int count, int *reg, int *bit) { int i; @@ -58,7 +53,7 @@ static acpi_status intel_pmic_power_handler(u32 function, { struct intel_pmic_opregion *opregion = region_context; struct regmap *regmap = opregion->regmap; - struct intel_pmic_opregion_data *d = opregion->data; + const struct intel_pmic_opregion_data *d = opregion->data; int reg, bit, result; if (bits != 32 || !value64) @@ -100,7 +95,7 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion, return 0; } - temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp); + temp = opregion->data->lpat_raw_to_temp(opregion->lpat_table, raw_temp); if (temp < 0) return temp; @@ -140,7 +135,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg, static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg, int bit, u32 function, u64 *value) { - struct intel_pmic_opregion_data *d = opregion->data; + const struct intel_pmic_opregion_data *d = opregion->data; struct regmap *regmap = opregion->regmap; if (!d->get_policy || !d->update_policy) @@ -176,7 +171,7 @@ static acpi_status intel_pmic_thermal_handler(u32 function, void *handler_context, void *region_context) { struct intel_pmic_opregion *opregion = region_context; - struct intel_pmic_opregion_data *d = opregion->data; + const struct intel_pmic_opregion_data *d = opregion->data; int reg, bit, result; if (bits != 32 || !value64) @@ -216,31 +211,36 @@ static acpi_status intel_pmic_regs_handler(u32 function, void *handler_context, void *region_context) { struct intel_pmic_opregion *opregion = region_context; - int result = 0; + int result = -EINVAL; + + if (function == ACPI_WRITE) { + switch (address) { + case 0: + return AE_OK; + case 1: + opregion->ctx.addr |= (*value64 & 0xff) << 8; + return AE_OK; + case 2: + opregion->ctx.addr |= *value64 & 0xff; + return AE_OK; + case 3: + opregion->ctx.val = *value64 & 0xff; + return AE_OK; + case 4: + if (*value64) { + result = regmap_write(opregion->regmap, opregion->ctx.addr, + opregion->ctx.val); + } else { + result = regmap_read(opregion->regmap, opregion->ctx.addr, + &opregion->ctx.val); + } + opregion->ctx.addr = 0; + } + } - switch (address) { - case 0: + if (function == ACPI_READ && address == 3) { + *value64 = opregion->ctx.val; return AE_OK; - case 1: - opregion->ctx.addr |= (*value64 & 0xff) << 8; - return AE_OK; - case 2: - opregion->ctx.addr |= *value64 & 0xff; - return AE_OK; - case 3: - opregion->ctx.val = *value64 & 0xff; - return AE_OK; - case 4: - if (*value64) { - result = regmap_write(opregion->regmap, opregion->ctx.addr, - opregion->ctx.val); - } else { - result = regmap_read(opregion->regmap, opregion->ctx.addr, - &opregion->ctx.val); - if (result == 0) - *value64 = opregion->ctx.val; - } - memset(&opregion->ctx, 0x00, sizeof(opregion->ctx)); } if (result < 0) { @@ -255,9 +255,9 @@ static acpi_status intel_pmic_regs_handler(u32 function, int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, - struct intel_pmic_opregion_data *d) + const struct intel_pmic_opregion_data *d) { - acpi_status status; + acpi_status status = AE_OK; struct intel_pmic_opregion *opregion; int ret; @@ -275,7 +275,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, opregion->regmap = regmap; opregion->lpat_table = acpi_lpat_get_conversion_table(handle); - status = acpi_install_address_space_handler(handle, + if (d->power_table_count) + status = acpi_install_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler, NULL, opregion); @@ -284,13 +285,12 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, goto out_error; } - status = acpi_install_address_space_handler(handle, + if (d->thermal_table_count) + status = acpi_install_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, intel_pmic_thermal_handler, NULL, opregion); if (ACPI_FAILURE(status)) { - acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, - intel_pmic_power_handler); ret = -ENODEV; goto out_remove_power_handler; } @@ -304,18 +304,80 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, } opregion->data = d; + intel_pmic_opregion = opregion; return 0; out_remove_thermal_handler: - acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, - intel_pmic_thermal_handler); + if (d->thermal_table_count) + acpi_remove_address_space_handler(handle, + PMIC_THERMAL_OPREGION_ID, + intel_pmic_thermal_handler); out_remove_power_handler: - acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, - intel_pmic_power_handler); + if (d->power_table_count) + acpi_remove_address_space_handler(handle, + PMIC_POWER_OPREGION_ID, + intel_pmic_power_handler); out_error: acpi_lpat_free_conversion_table(opregion->lpat_table); return ret; } EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); + +/** + * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence + * @i2c_address: I2C client address for the PMIC + * @reg_address: PMIC register address + * @value: New value for the register bits to change + * @mask: Mask indicating which register bits to change + * + * DSI LCD panels describe an initialization sequence in the i915 VBT (Video + * BIOS Tables) using so called MIPI sequences. One possible element in these + * sequences is a PMIC specific element of 15 bytes. + * + * This function executes these PMIC specific elements sending the embedded + * commands to the PMIC. + * + * Return 0 on success, < 0 on failure. + */ +int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, + u32 value, u32 mask) +{ + const struct intel_pmic_opregion_data *d; + int ret; + + if (!intel_pmic_opregion) { + pr_warn("%s: No PMIC registered\n", __func__); + return -ENXIO; + } + + d = intel_pmic_opregion->data; + + mutex_lock(&intel_pmic_opregion->lock); + + if (d->exec_mipi_pmic_seq_element) { + ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap, + i2c_address, reg_address, + value, mask); + } else if (d->pmic_i2c_address) { + if (i2c_address == d->pmic_i2c_address) { + ret = regmap_update_bits(intel_pmic_opregion->regmap, + reg_address, mask, value); + } else { + pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", + __func__, i2c_address, reg_address, value, mask); + ret = -ENXIO; + } + } else { + pr_warn("%s: Not implemented\n", __func__); + pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n", + __func__, i2c_address, reg_address, value, mask); + ret = -EOPNOTSUPP; + } + + mutex_unlock(&intel_pmic_opregion->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element); diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h index 095afc96952e..006f0780ffab 100644 --- a/drivers/acpi/pmic/intel_pmic.h +++ b/drivers/acpi/pmic/intel_pmic.h @@ -2,6 +2,8 @@ #ifndef __INTEL_PMIC_H #define __INTEL_PMIC_H +#include <acpi/acpi_lpat.h> + struct pmic_table { int address; /* operation region address */ int reg; /* corresponding thermal register */ @@ -15,12 +17,20 @@ struct intel_pmic_opregion_data { int (*update_aux)(struct regmap *r, int reg, int raw_temp); int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value); int (*update_policy)(struct regmap *r, int reg, int bit, int enable); - struct pmic_table *power_table; + int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address, + u32 reg_address, u32 value, u32 mask); + int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table, + int raw); + const struct pmic_table *power_table; int power_table_count; - struct pmic_table *thermal_table; + const struct pmic_table *thermal_table; int thermal_table_count; + /* For generic exec_mipi_pmic_seq_element handling */ + int pmic_i2c_address; }; -int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d); +int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, + struct regmap *regmap, + const struct intel_pmic_opregion_data *d); #endif diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c index bd7621edd60b..c332afbf82bd 100644 --- a/drivers/acpi/pmic/intel_pmic_bxtwc.c +++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c @@ -24,7 +24,7 @@ #define VSWITCH1_OUTPUT BIT(4) #define VUSBPHY_CHARGE BIT(1) -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x0, .reg = 0x63, @@ -177,7 +177,7 @@ static struct pmic_table power_table[] = { } /* MOFF -> MODEMCTRL Bit 0 */ }; -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x4F39 @@ -369,13 +369,14 @@ intel_bxtwc_pmic_update_policy(struct regmap *regmap, return regmap_update_bits(regmap, reg, mask, val); } -static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = { +static const struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = { .get_power = intel_bxtwc_pmic_get_power, .update_power = intel_bxtwc_pmic_update_power, .get_raw_temp = intel_bxtwc_pmic_get_raw_temp, .update_aux = intel_bxtwc_pmic_update_aux, .get_policy = intel_bxtwc_pmic_get_policy, .update_policy = intel_bxtwc_pmic_update_policy, + .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), .thermal_table = thermal_table, diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c index a0f411a6e5ac..b4c21a75294a 100644 --- a/drivers/acpi/pmic/intel_pmic_crc.c +++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Intel CrystalCove PMIC operation region driver + * Intel Bay Trail Crystal Cove PMIC operation region driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. */ @@ -16,7 +16,7 @@ #define PMIC_A0LOCK_REG 0xc5 -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { /* { .address = 0x00, .reg = ??, @@ -134,7 +134,7 @@ static struct pmic_table power_table[] = { }, /* V105 -> V1P05S, L2 SRAM */ }; -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x75 @@ -271,17 +271,19 @@ static int intel_crc_pmic_update_policy(struct regmap *regmap, return 0; } -static struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = { +static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = { .get_power = intel_crc_pmic_get_power, .update_power = intel_crc_pmic_update_power, .get_raw_temp = intel_crc_pmic_get_raw_temp, .update_aux = intel_crc_pmic_update_aux, .get_policy = intel_crc_pmic_get_policy, .update_policy = intel_crc_pmic_update_policy, + .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count= ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), + .pmic_i2c_address = 0x6e, }; static int intel_crc_pmic_opregion_probe(struct platform_device *pdev) @@ -295,7 +297,7 @@ static int intel_crc_pmic_opregion_probe(struct platform_device *pdev) static struct platform_driver intel_crc_pmic_opregion_driver = { .probe = intel_crc_pmic_opregion_probe, .driver = { - .name = "crystal_cove_pmic", + .name = "byt_crystal_cove_pmic", }, }; builtin_platform_driver(intel_crc_pmic_opregion_driver); diff --git a/drivers/acpi/pmic/intel_pmic_chtcrc.c b/drivers/acpi/pmic/intel_pmic_chtcrc.c new file mode 100644 index 000000000000..f9301c6f098e --- /dev/null +++ b/drivers/acpi/pmic/intel_pmic_chtcrc.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Cherry Trail Crystal Cove PMIC operation region driver + * + * Copyright (C) 2019 Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/mfd/intel_soc_pmic.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include "intel_pmic.h" + +/* + * We have no docs for the CHT Crystal Cove PMIC. The Asus Zenfone-2 kernel + * code has 2 Crystal Cove regulator drivers, one calls the PMIC a "Crystal + * Cove Plus" PMIC and talks about Cherry Trail, so presumably that one + * could be used to get register info for the regulators if we need to + * implement regulator support in the future. + * + * For now the sole purpose of this driver is to make + * intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a + * CHT Crystal Cove PMIC. + */ +static const struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = { + .lpat_raw_to_temp = acpi_lpat_raw_to_temp, + .pmic_i2c_address = 0x6e, +}; + +static int intel_chtcrc_pmic_opregion_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + return intel_pmic_install_opregion_handler(&pdev->dev, + ACPI_HANDLE(pdev->dev.parent), pmic->regmap, + &intel_chtcrc_pmic_opregion_data); +} + +static struct platform_driver intel_chtcrc_pmic_opregion_driver = { + .probe = intel_chtcrc_pmic_opregion_probe, + .driver = { + .name = "cht_crystal_cove_pmic", + }, +}; +builtin_platform_driver(intel_chtcrc_pmic_opregion_driver); diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c index 7ccd7d9660bc..ecb36fbc1e7f 100644 --- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c +++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c @@ -8,34 +8,38 @@ */ #include <linux/acpi.h> +#include <linux/bits.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> +#include <asm/byteorder.h> #include "intel_pmic.h" /* registers stored in 16bit BE (high:low, total 10bit) */ +#define PMIC_REG_MASK GENMASK(9, 0) + #define CHTDC_TI_VBAT 0x54 #define CHTDC_TI_DIETEMP 0x56 #define CHTDC_TI_BPTHERM 0x58 #define CHTDC_TI_GPADC 0x5a -static struct pmic_table chtdc_ti_power_table[] = { - { .address = 0x00, .reg = 0x41 }, - { .address = 0x04, .reg = 0x42 }, - { .address = 0x08, .reg = 0x43 }, - { .address = 0x0c, .reg = 0x45 }, - { .address = 0x10, .reg = 0x46 }, - { .address = 0x14, .reg = 0x47 }, - { .address = 0x18, .reg = 0x48 }, - { .address = 0x1c, .reg = 0x49 }, - { .address = 0x20, .reg = 0x4a }, - { .address = 0x24, .reg = 0x4b }, - { .address = 0x28, .reg = 0x4c }, - { .address = 0x2c, .reg = 0x4d }, - { .address = 0x30, .reg = 0x4e }, +static const struct pmic_table chtdc_ti_power_table[] = { + { .address = 0x00, .reg = 0x41 }, /* LDO1 */ + { .address = 0x04, .reg = 0x42 }, /* LDO2 */ + { .address = 0x08, .reg = 0x43 }, /* LDO3 */ + { .address = 0x0c, .reg = 0x45 }, /* LDO5 */ + { .address = 0x10, .reg = 0x46 }, /* LDO6 */ + { .address = 0x14, .reg = 0x47 }, /* LDO7 */ + { .address = 0x18, .reg = 0x48 }, /* LDO8 */ + { .address = 0x1c, .reg = 0x49 }, /* LDO9 */ + { .address = 0x20, .reg = 0x4a }, /* LD10 */ + { .address = 0x24, .reg = 0x4b }, /* LD11 */ + { .address = 0x28, .reg = 0x4c }, /* LD12 */ + { .address = 0x2c, .reg = 0x4d }, /* LD13 */ + { .address = 0x30, .reg = 0x4e }, /* LD14 */ }; -static struct pmic_table chtdc_ti_thermal_table[] = { +static const struct pmic_table chtdc_ti_thermal_table[] = { { .address = 0x00, .reg = CHTDC_TI_GPADC @@ -73,7 +77,7 @@ static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit, if (regmap_read(regmap, reg, &data)) return -EIO; - *value = data & 1; + *value = data & BIT(0); return 0; } @@ -85,23 +89,24 @@ static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit, static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg) { - u8 buf[2]; + __be16 buf; - if (regmap_bulk_read(regmap, reg, buf, 2)) + if (regmap_bulk_read(regmap, reg, &buf, sizeof(buf))) return -EIO; - /* stored in big-endian */ - return ((buf[0] & 0x03) << 8) | buf[1]; + return be16_to_cpu(buf) & PMIC_REG_MASK; } -static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = { +static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = { .get_power = chtdc_ti_pmic_get_power, .update_power = chtdc_ti_pmic_update_power, .get_raw_temp = chtdc_ti_pmic_get_raw_temp, + .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = chtdc_ti_power_table, .power_table_count = ARRAY_SIZE(chtdc_ti_power_table), .thermal_table = chtdc_ti_thermal_table, .thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table), + .pmic_i2c_address = 0x5e, }; static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev) @@ -116,7 +121,7 @@ static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev) return err; /* Re-enumerate devices depending on PMIC */ - acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent)); + acpi_dev_clear_dependencies(ACPI_COMPANION(pdev->dev.parent)); return 0; } diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c index 078b0448f30a..81caede51ca2 100644 --- a/drivers/acpi/pmic/intel_pmic_chtwc.c +++ b/drivers/acpi/pmic/intel_pmic_chtwc.c @@ -70,7 +70,7 @@ * "regulator: whiskey_cove: implements Whiskey Cove pmic VRF support" * https://github.com/intel-aero/meta-intel-aero/blob/master/recipes-kernel/linux/linux-yocto/0019-regulator-whiskey_cove-implements-WhiskeyCove-pmic-V.patch */ -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x0, .reg = CHT_WC_V1P8A_CTRL, @@ -231,13 +231,34 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg, return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0); } +static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap, + u16 i2c_client_address, + u32 reg_address, + u32 value, u32 mask) +{ + struct device *dev = regmap_get_device(regmap); + u32 address; + + if (i2c_client_address > 0xff || reg_address > 0xff) { + dev_warn(dev, "warning addresses too big client 0x%x reg 0x%x\n", + i2c_client_address, reg_address); + return -ERANGE; + } + + address = (i2c_client_address << 8) | reg_address; + + return regmap_update_bits(regmap, address, mask, value); +} + /* * The thermal table and ops are empty, we do not support the Thermal opregion * (DPTF) due to lacking documentation. */ -static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = { +static const struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = { .get_power = intel_cht_wc_pmic_get_power, .update_power = intel_cht_wc_pmic_update_power, + .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element, + .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), }; diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index e7c0006e6602..49bda5e0c8aa 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -26,7 +26,7 @@ #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) #define AXP288_ADC_TS_CURRENT_ON (3 << 0) -static struct pmic_table power_table[] = { +static const struct pmic_table power_table[] = { { .address = 0x00, .reg = 0x13, @@ -129,7 +129,7 @@ static struct pmic_table power_table[] = { }; /* TMP0 - TMP5 are the same, all from GPADC */ -static struct pmic_table thermal_table[] = { +static const struct pmic_table thermal_table[] = { { .address = 0x00, .reg = XPOWER_GPADC_LOW @@ -178,15 +178,17 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, { int data, ret; - /* GPIO1 LDO regulator needs special handling */ - if (reg == XPOWER_GPI1_CTRL) - return regmap_update_bits(regmap, reg, GPI1_LDO_MASK, - on ? GPI1_LDO_ON : GPI1_LDO_OFF); - ret = iosf_mbi_block_punit_i2c_access(); if (ret) return ret; + /* GPIO1 LDO regulator needs special handling */ + if (reg == XPOWER_GPI1_CTRL) { + ret = regmap_update_bits(regmap, reg, GPI1_LDO_MASK, + on ? GPI1_LDO_ON : GPI1_LDO_OFF); + goto out; + } + if (regmap_read(regmap, reg, &data)) { ret = -EIO; goto out; @@ -234,6 +236,11 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) return ret; if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + /* + * AXP288_ADC_TS_PIN_CTRL reads are cached by the regmap, so + * this does to a single I2C-transfer, and thus there is no + * need to explicitly call iosf_mbi_block_punit_i2c_access(). + */ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_CURRENT_ON_OFF_MASK, AXP288_ADC_TS_CURRENT_ON_ONDEMAND); @@ -244,7 +251,11 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) usleep_range(6000, 10000); } - ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); + ret = iosf_mbi_block_punit_i2c_access(); + if (ret) + return ret; + + ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, sizeof(buf)); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); @@ -254,17 +265,67 @@ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) AXP288_ADC_TS_CURRENT_ON); } + iosf_mbi_unblock_punit_i2c_access(); + + return ret; +} + +static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap, + u16 i2c_address, u32 reg_address, + u32 value, u32 mask) +{ + struct device *dev = regmap_get_device(regmap); + int ret; + + if (i2c_address != 0x34) { + dev_err(dev, "Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", + i2c_address, reg_address, value, mask); + return -ENXIO; + } + + ret = iosf_mbi_block_punit_i2c_access(); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, reg_address, mask, value); + + iosf_mbi_unblock_punit_i2c_access(); + return ret; } -static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = { +static int intel_xpower_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw) +{ + struct acpi_lpat first = lpat_table->lpat[0]; + struct acpi_lpat last = lpat_table->lpat[lpat_table->lpat_count - 1]; + + /* + * Some LPAT tables in the ACPI Device for the AXP288 PMIC for some + * reason only describe a small temperature range, e.g. 27° - 37° + * Celcius. Resulting in errors when the tablet is idle in a cool room. + * + * To avoid these errors clamp the raw value to be inside the LPAT. + */ + if (first.raw < last.raw) + raw = clamp(raw, first.raw, last.raw); + else + raw = clamp(raw, last.raw, first.raw); + + return acpi_lpat_raw_to_temp(lpat_table, raw); +} + +static const struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = { .get_power = intel_xpower_pmic_get_power, .update_power = intel_xpower_pmic_update_power, .get_raw_temp = intel_xpower_pmic_get_raw_temp, + .exec_mipi_pmic_seq_element = intel_xpower_exec_mipi_pmic_seq_element, + .lpat_raw_to_temp = intel_xpower_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), + .pmic_i2c_address = 0x34, }; static acpi_status intel_xpower_pmic_gpio_handler(u32 function, diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c index ebd03e472955..0d1a82eeb4b0 100644 --- a/drivers/acpi/pmic/tps68470_pmic.c +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) struct tps68470_pmic_opregion *opregion; acpi_status status; - if (!dev || !tps68470_regmap) { - dev_warn(dev, "dev or regmap is NULL\n"); - return -EINVAL; - } + if (!tps68470_regmap) + return dev_err_probe(dev, -EINVAL, "regmap is missing\n"); if (!handle) { dev_warn(dev, "acpi handle is NULL\n"); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 665e93ca0b40..361a7721a6a8 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/acpi/power.c - ACPI Power Resources management. * @@ -5,20 +6,6 @@ * Author: Andy Grover <andrew.grover@intel.com> * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* @@ -26,7 +13,7 @@ * 1. via "Device Specific (D-State) Control" * 2. via "Power Resource Control". * The code below deals with ACPI Power Resources control. - * + * * An ACPI "power resource object" represents a software controllable power * plane, clock plane, or other resource depended on by a device. * @@ -34,36 +21,42 @@ * may be shared by multiple devices. */ +#define pr_fmt(fmt) "ACPI: PM: " fmt + +#include <linux/delay.h> +#include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/pm_runtime.h> #include <linux/sysfs.h> #include <linux/acpi.h> #include "sleep.h" #include "internal.h" -#define _COMPONENT ACPI_POWER_COMPONENT -ACPI_MODULE_NAME("power"); #define ACPI_POWER_CLASS "power_resource" #define ACPI_POWER_DEVICE_NAME "Power Resource" -#define ACPI_POWER_FILE_INFO "info" -#define ACPI_POWER_FILE_STATUS "state" #define ACPI_POWER_RESOURCE_STATE_OFF 0x00 #define ACPI_POWER_RESOURCE_STATE_ON 0x01 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF +struct acpi_power_dependent_device { + struct device *dev; + struct list_head node; +}; + struct acpi_power_resource { struct acpi_device device; struct list_head list_node; - char *name; u32 system_level; u32 order; unsigned int ref_count; - bool wakeup_enabled; + u8 state; struct mutex resource_lock; + struct list_head dependents; }; struct acpi_power_resource_entry { @@ -71,6 +64,9 @@ struct acpi_power_resource_entry { struct acpi_power_resource *resource; }; +static bool hp_eb_gp12pxp_quirk; +static bool unused_power_resources_quirk; + static LIST_HEAD(acpi_power_resource_list); static DEFINE_MUTEX(power_resource_list_lock); @@ -78,6 +74,11 @@ static DEFINE_MUTEX(power_resource_list_lock); Power Resource Management -------------------------------------------------------------------------- */ +static inline const char *resource_dev_name(struct acpi_power_resource *pr) +{ + return dev_name(&pr->device.dev); +} + static inline struct acpi_power_resource *to_power_resource(struct acpi_device *device) { @@ -86,9 +87,9 @@ struct acpi_power_resource *to_power_resource(struct acpi_device *device) static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle) { - struct acpi_device *device; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); - if (acpi_bus_get_device(handle, &device)) + if (!device) return NULL; return to_power_resource(device); @@ -156,6 +157,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, for (i = start; i < package->package.count; i++) { union acpi_object *element = &package->package.elements[i]; + struct acpi_device *rdev; acpi_handle rhandle; if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { @@ -172,10 +174,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, if (acpi_power_resource_is_dup(package, start, i)) continue; - err = acpi_add_power_resource(rhandle); - if (err) + rdev = acpi_add_power_resource(rhandle); + if (!rdev) { + err = -ENODEV; break; - + } err = acpi_power_resources_list_add(rhandle, list); if (err) break; @@ -186,50 +189,54 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, return err; } -static int acpi_power_get_state(acpi_handle handle, int *state) +static int __get_state(acpi_handle handle, u8 *state) { acpi_status status = AE_OK; unsigned long long sta = 0; - char node_name[5]; - struct acpi_buffer buffer = { sizeof(node_name), node_name }; - - - if (!handle || !state) - return -EINVAL; + u8 cur_state; status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return -ENODEV; - *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON: - ACPI_POWER_RESOURCE_STATE_OFF; + cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON; + + acpi_handle_debug(handle, "Power resource is %s\n", + str_on_off(cur_state)); - acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); + *state = cur_state; + return 0; +} - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", - node_name, - *state ? "on" : "off")); +static int acpi_power_get_state(struct acpi_power_resource *resource, u8 *state) +{ + if (resource->state == ACPI_POWER_RESOURCE_STATE_UNKNOWN) { + int ret; + ret = __get_state(resource->device.handle, &resource->state); + if (ret) + return ret; + } + + *state = resource->state; return 0; } -static int acpi_power_get_list_state(struct list_head *list, int *state) +static int acpi_power_get_list_state(struct list_head *list, u8 *state) { struct acpi_power_resource_entry *entry; - int cur_state; + u8 cur_state = ACPI_POWER_RESOURCE_STATE_OFF; if (!list || !state) return -EINVAL; /* The state of the list is 'on' IFF all resources are 'on'. */ - cur_state = 0; list_for_each_entry(entry, list, node) { struct acpi_power_resource *resource = entry->resource; - acpi_handle handle = resource->device.handle; int result; mutex_lock(&resource->resource_lock); - result = acpi_power_get_state(handle, &cur_state); + result = acpi_power_get_state(resource, &cur_state); mutex_unlock(&resource->resource_lock); if (result) return result; @@ -238,23 +245,155 @@ static int acpi_power_get_list_state(struct list_head *list, int *state) break; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", - cur_state ? "on" : "off")); + pr_debug("Power resource list is %s\n", str_on_off(cur_state)); *state = cur_state; return 0; } +static int +acpi_power_resource_add_dependent(struct acpi_power_resource *resource, + struct device *dev) +{ + struct acpi_power_dependent_device *dep; + int ret = 0; + + mutex_lock(&resource->resource_lock); + list_for_each_entry(dep, &resource->dependents, node) { + /* Only add it once */ + if (dep->dev == dev) + goto unlock; + } + + dep = kzalloc(sizeof(*dep), GFP_KERNEL); + if (!dep) { + ret = -ENOMEM; + goto unlock; + } + + dep->dev = dev; + list_add_tail(&dep->node, &resource->dependents); + dev_dbg(dev, "added power dependency to [%s]\n", + resource_dev_name(resource)); + +unlock: + mutex_unlock(&resource->resource_lock); + return ret; +} + +static void +acpi_power_resource_remove_dependent(struct acpi_power_resource *resource, + struct device *dev) +{ + struct acpi_power_dependent_device *dep; + + mutex_lock(&resource->resource_lock); + list_for_each_entry(dep, &resource->dependents, node) { + if (dep->dev == dev) { + list_del(&dep->node); + kfree(dep); + dev_dbg(dev, "removed power dependency to [%s]\n", + resource_dev_name(resource)); + break; + } + } + mutex_unlock(&resource->resource_lock); +} + +/** + * acpi_device_power_add_dependent - Add dependent device of this ACPI device + * @adev: ACPI device pointer + * @dev: Dependent device + * + * If @adev has non-empty _PR0 the @dev is added as dependent device to all + * power resources returned by it. This means that whenever these power + * resources are turned _ON the dependent devices get runtime resumed. This + * is needed for devices such as PCI to allow its driver to re-initialize + * it after it went to D0uninitialized. + * + * If @adev does not have _PR0 this does nothing. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int acpi_device_power_add_dependent(struct acpi_device *adev, + struct device *dev) +{ + struct acpi_power_resource_entry *entry; + struct list_head *resources; + int ret; + + if (!adev->flags.power_manageable) + return 0; + + resources = &adev->power.states[ACPI_STATE_D0].resources; + list_for_each_entry(entry, resources, node) { + ret = acpi_power_resource_add_dependent(entry->resource, dev); + if (ret) + goto err; + } + + return 0; + +err: + list_for_each_entry(entry, resources, node) + acpi_power_resource_remove_dependent(entry->resource, dev); + + return ret; +} + +/** + * acpi_device_power_remove_dependent - Remove dependent device + * @adev: ACPI device pointer + * @dev: Dependent device + * + * Does the opposite of acpi_device_power_add_dependent() and removes the + * dependent device if it is found. Can be called to @adev that does not + * have _PR0 as well. + */ +void acpi_device_power_remove_dependent(struct acpi_device *adev, + struct device *dev) +{ + struct acpi_power_resource_entry *entry; + struct list_head *resources; + + if (!adev->flags.power_manageable) + return; + + resources = &adev->power.states[ACPI_STATE_D0].resources; + list_for_each_entry_reverse(entry, resources, node) + acpi_power_resource_remove_dependent(entry->resource, dev); +} + static int __acpi_power_on(struct acpi_power_resource *resource) { + acpi_handle handle = resource->device.handle; + struct acpi_power_dependent_device *dep; acpi_status status = AE_OK; - status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL); - if (ACPI_FAILURE(status)) + status = acpi_evaluate_object(handle, "_ON", NULL, NULL); + if (ACPI_FAILURE(status)) { + resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN; return -ENODEV; + } + + resource->state = ACPI_POWER_RESOURCE_STATE_ON; + + acpi_handle_debug(handle, "Power resource turned on\n"); + + /* + * If there are other dependents on this power resource we need to + * resume them now so that their drivers can re-initialize the + * hardware properly after it went back to D0. + */ + if (list_empty(&resource->dependents) || + list_is_singular(&resource->dependents)) + return 0; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", - resource->name)); + list_for_each_entry(dep, &resource->dependents, node) { + dev_dbg(dep->dev, "runtime resuming because [%s] turned on\n", + resource_dev_name(resource)); + pm_request_resume(dep->dev); + } return 0; } @@ -264,9 +403,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource) int result = 0; if (resource->ref_count++) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Power resource [%s] already on\n", - resource->name)); + acpi_handle_debug(resource->device.handle, + "Power resource already on\n"); } else { result = __acpi_power_on(resource); if (result) @@ -287,15 +425,19 @@ static int acpi_power_on(struct acpi_power_resource *resource) static int __acpi_power_off(struct acpi_power_resource *resource) { + acpi_handle handle = resource->device.handle; acpi_status status; - status = acpi_evaluate_object(resource->device.handle, "_OFF", - NULL, NULL); - if (ACPI_FAILURE(status)) + status = acpi_evaluate_object(handle, "_OFF", NULL, NULL); + if (ACPI_FAILURE(status)) { + resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN; return -ENODEV; + } + + resource->state = ACPI_POWER_RESOURCE_STATE_OFF; + + acpi_handle_debug(handle, "Power resource turned off\n"); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n", - resource->name)); return 0; } @@ -304,16 +446,14 @@ static int acpi_power_off_unlocked(struct acpi_power_resource *resource) int result = 0; if (!resource->ref_count) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Power resource [%s] already off\n", - resource->name)); + acpi_handle_debug(resource->device.handle, + "Power resource already off\n"); return 0; } if (--resource->ref_count) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Power resource [%s] still in use\n", - resource->name)); + acpi_handle_debug(resource->device.handle, + "Power resource still in use\n"); } else { result = __acpi_power_off(resource); if (result) @@ -480,21 +620,19 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p) list_for_each_entry(entry, list, node) { struct acpi_power_resource *resource = entry->resource; - acpi_handle handle = resource->device.handle; - int result; - int state; + u8 state; mutex_lock(&resource->resource_lock); - result = acpi_power_get_state(handle, &state); - if (result) { - mutex_unlock(&resource->resource_lock); - return result; - } - if (state == ACPI_POWER_RESOURCE_STATE_ON) { - resource->ref_count++; - resource->wakeup_enabled = true; - } + /* + * Make sure that the power resource state and its reference + * counter value are consistent with each other. + */ + if (!resource->ref_count && + !acpi_power_get_state(resource, &state) && + state == ACPI_POWER_RESOURCE_STATE_ON) + __acpi_power_off(resource); + if (system_level > resource->system_level) system_level = resource->system_level; @@ -526,7 +664,7 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p) * -ENODEV if the execution of either _DSW or _PSW has failed */ int acpi_device_sleep_wake(struct acpi_device *dev, - int enable, int sleep_state, int dev_state) + int enable, int sleep_state, int dev_state) { union acpi_object in_arg[3]; struct acpi_object_list arg_list = { 3, in_arg }; @@ -535,12 +673,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev, /* * Try to execute _DSW first. * - * Three agruments are needed for the _DSW object: + * Three arguments are needed for the _DSW object: * Argument 0: enable/disable the wake capabilities * Argument 1: target system state * Argument 2: target device state * When _DSW object is called to disable the wake capabilities, maybe - * the first argument is filled. The values of the other two agruments + * the first argument is filled. The values of the other two arguments * are meaningless. */ in_arg[0].type = ACPI_TYPE_INTEGER; @@ -553,7 +691,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev, if (ACPI_SUCCESS(status)) { return 0; } else if (status != AE_NOT_FOUND) { - printk(KERN_ERR PREFIX "_DSW execution failed\n"); + acpi_handle_info(dev->handle, "_DSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } @@ -561,7 +699,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev, /* Execute _PSW */ status = acpi_execute_simple_method(dev->handle, "_PSW", enable); if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - printk(KERN_ERR PREFIX "_PSW execution failed\n"); + acpi_handle_info(dev->handle, "_PSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } @@ -571,13 +709,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev, /* * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): - * 1. Power on the power resources required for the wakeup device + * 1. Power on the power resources required for the wakeup device * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present */ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) { - struct acpi_power_resource_entry *entry; int err = 0; if (!dev || !dev->wakeup.flags.valid) @@ -585,36 +722,31 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) mutex_lock(&acpi_device_lock); + dev_dbg(&dev->dev, "Enabling wakeup power (count %d)\n", + dev->wakeup.prepare_count); + if (dev->wakeup.prepare_count++) goto out; - list_for_each_entry(entry, &dev->wakeup.resources, node) { - struct acpi_power_resource *resource = entry->resource; - - mutex_lock(&resource->resource_lock); - - if (!resource->wakeup_enabled) { - err = acpi_power_on_unlocked(resource); - if (!err) - resource->wakeup_enabled = true; - } - - mutex_unlock(&resource->resource_lock); - - if (err) { - dev_err(&dev->dev, - "Cannot turn wakeup power resources on\n"); - dev->wakeup.flags.valid = 0; - goto out; - } + err = acpi_power_on_list(&dev->wakeup.resources); + if (err) { + dev_err(&dev->dev, "Cannot turn on wakeup power resources\n"); + dev->wakeup.flags.valid = 0; + goto out; } + /* * Passing 3 as the third argument below means the device may be * put into arbitrary power state afterward. */ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); - if (err) + if (err) { + acpi_power_off_list(&dev->wakeup.resources); dev->wakeup.prepare_count = 0; + goto out; + } + + dev_dbg(&dev->dev, "Wakeup power enabled\n"); out: mutex_unlock(&acpi_device_lock); @@ -637,41 +769,39 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) mutex_lock(&acpi_device_lock); - if (--dev->wakeup.prepare_count > 0) + dev_dbg(&dev->dev, "Disabling wakeup power (count %d)\n", + dev->wakeup.prepare_count); + + /* Do nothing if wakeup power has not been enabled for this device. */ + if (dev->wakeup.prepare_count <= 0) goto out; - /* - * Executing the code below even if prepare_count is already zero when - * the function is called may be useful, for example for initialisation. - */ - if (dev->wakeup.prepare_count < 0) - dev->wakeup.prepare_count = 0; + if (--dev->wakeup.prepare_count > 0) + goto out; err = acpi_device_sleep_wake(dev, 0, 0, 0); if (err) goto out; + /* + * All of the power resources in the list need to be turned off even if + * there are errors. + */ list_for_each_entry(entry, &dev->wakeup.resources, node) { - struct acpi_power_resource *resource = entry->resource; - - mutex_lock(&resource->resource_lock); - - if (resource->wakeup_enabled) { - err = acpi_power_off_unlocked(resource); - if (!err) - resource->wakeup_enabled = false; - } - - mutex_unlock(&resource->resource_lock); + int ret; - if (err) { - dev_err(&dev->dev, - "Cannot turn wakeup power resources off\n"); - dev->wakeup.flags.valid = 0; - break; - } + ret = acpi_power_off(entry->resource); + if (ret && !err) + err = ret; + } + if (err) { + dev_err(&dev->dev, "Cannot turn off wakeup power resources\n"); + dev->wakeup.flags.valid = 0; + goto out; } + dev_dbg(&dev->dev, "Wakeup power disabled\n"); + out: mutex_unlock(&acpi_device_lock); return err; @@ -679,8 +809,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) int acpi_power_get_inferred_state(struct acpi_device *device, int *state) { + u8 list_state = ACPI_POWER_RESOURCE_STATE_OFF; int result = 0; - int list_state = 0; int i = 0; if (!device || !state) @@ -767,15 +897,16 @@ static void acpi_release_power_resource(struct device *dev) kfree(resource); } -static ssize_t acpi_power_in_use_show(struct device *dev, - struct device_attribute *attr, - char *buf) { +static ssize_t resource_in_use_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ struct acpi_power_resource *resource; resource = to_power_resource(to_acpi_device(dev)); return sprintf(buf, "%u\n", !!resource->ref_count); } -static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL); +static DEVICE_ATTR_RO(resource_in_use); static void acpi_power_sysfs_remove(struct acpi_device *device) { @@ -801,50 +932,54 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource mutex_unlock(&power_resource_list_lock); } -int acpi_add_power_resource(acpi_handle handle) +struct acpi_device *acpi_add_power_resource(acpi_handle handle) { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_power_resource *resource; - struct acpi_device *device = NULL; union acpi_object acpi_object; struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object }; acpi_status status; - int state, result = -ENODEV; + u8 state_dummy; + int result; - acpi_bus_get_device(handle, &device); if (device) - return 0; + return device; resource = kzalloc(sizeof(*resource), GFP_KERNEL); if (!resource) - return -ENOMEM; + return NULL; device = &resource->device; acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, - ACPI_STA_DEFAULT); + acpi_release_power_resource); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->list_node); - resource->name = device->pnp.bus_id; - strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_POWER_CLASS); + INIT_LIST_HEAD(&resource->dependents); + strscpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_POWER_CLASS); device->power.state = ACPI_STATE_UNKNOWN; + device->flags.match_driver = true; - /* Evalute the object to get the system level and resource order. */ + /* Evaluate the object to get the system level and resource order. */ status = acpi_evaluate_object(handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) goto err; resource->system_level = acpi_object.power_resource.system_level; resource->order = acpi_object.power_resource.resource_order; + resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN; - result = acpi_power_get_state(handle, &state); + /* Get the initial state or just flip it on if that fails. */ + if (acpi_power_get_state(resource, &state_dummy)) + __acpi_power_on(resource); + + acpi_handle_info(handle, "New power resource\n"); + + result = acpi_tie_acpi_dev(device); if (result) goto err; - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), - acpi_device_bid(device), state ? "on" : "off"); - - device->flags.match_driver = true; - result = acpi_device_add(device, acpi_release_power_resource); + result = acpi_device_add(device); if (result) goto err; @@ -853,14 +988,46 @@ int acpi_add_power_resource(acpi_handle handle) acpi_power_add_resource_to_list(resource); acpi_device_add_finalize(device); - return 0; + return device; err: acpi_release_power_resource(&device->dev); - return result; + return NULL; } #ifdef CONFIG_ACPI_SLEEP +static bool resource_is_gp12pxp(acpi_handle handle) +{ + const char *path; + bool ret; + + path = acpi_handle_path(handle); + ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0; + kfree(path); + + return ret; +} + +static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource) +{ + acpi_handle_notice(resource->device.handle, + "HP EB quirk - turning OFF then ON\n"); + + __acpi_power_off(resource); + __acpi_power_on(resource); + + /* + * Use the same delay as DSDT uses in modem _RST method. + * + * Otherwise we get "Unable to change power state from unknown to D0, + * device inaccessible" error for the modem PCI device after thaw. + * + * This power resource is normally being enabled only during thaw (once) + * so this wait is not a performance issue. + */ + msleep(200); +} + void acpi_resume_power_resources(void) { struct acpi_power_resource *resource; @@ -868,11 +1035,13 @@ void acpi_resume_power_resources(void) mutex_lock(&power_resource_list_lock); list_for_each_entry(resource, &acpi_power_resource_list, list_node) { - int result, state; + int result; + u8 state; mutex_lock(&resource->resource_lock); - result = acpi_power_get_state(resource->device.handle, &state); + resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN; + result = acpi_power_get_state(resource, &state); if (result) { mutex_unlock(&resource->resource_lock); continue; @@ -880,8 +1049,14 @@ void acpi_resume_power_resources(void) if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count) { - dev_info(&resource->device.dev, "Turning ON\n"); - __acpi_power_on(resource); + if (hp_eb_gp12pxp_quirk && + resource_is_gp12pxp(resource->device.handle)) { + acpi_resume_on_eb_gp12pxp(resource); + } else { + acpi_handle_debug(resource->device.handle, + "Turning ON\n"); + __acpi_power_on(resource); + } } mutex_unlock(&resource->resource_lock); @@ -889,27 +1064,76 @@ void acpi_resume_power_resources(void) mutex_unlock(&power_resource_list_lock); } +#endif + +static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = { +/* + * This laptop (and possibly similar models too) has power resource called + * "GP12.PXP_" for its WWAN modem. + * + * For this power resource to turn ON power for the modem it needs certain + * internal flag called "ONEN" to be set. + * This flag only gets set from this power resource "_OFF" method, while the + * actual modem power gets turned off during suspend by "GP12.PTS" method + * called from the global "_PTS" (Prepare To Sleep) method. + * On the other hand, this power resource "_OFF" method implementation just + * sets the aforementioned flag without actually doing anything else (it + * doesn't contain any code to actually turn off power). + * + * The above means that when upon hibernation finish we try to set this + * power resource back ON since its "_STA" method returns 0 (while the resource + * is still considered in use) its "_ON" method won't do anything since + * that "ONEN" flag is not set. + * Overall, this means the modem is dead until laptop is rebooted since its + * power has been cut by "_PTS" and its PCI configuration was lost and not able + * to be restored. + * + * The easiest way to workaround the issue is to call this power resource + * "_OFF" method before calling the "_ON" method to make sure the "ONEN" + * flag gets properly set. + */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"), + }, + }, + {} +}; + +static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = { + { + /* + * The Toshiba Click Mini has a CPR3 power-resource which must + * be on for the touchscreen to work, but which is not in any + * _PR? lists. The other 2 affected power-resources are no-ops. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"), + }, + }, + {} +}; +/** + * acpi_turn_off_unused_power_resources - Turn off power resources not in use. + */ void acpi_turn_off_unused_power_resources(void) { struct acpi_power_resource *resource; + if (unused_power_resources_quirk) + return; + mutex_lock(&power_resource_list_lock); list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { - int result, state; - mutex_lock(&resource->resource_lock); - result = acpi_power_get_state(resource->device.handle, &state); - if (result) { - mutex_unlock(&resource->resource_lock); - continue; - } - - if (state == ACPI_POWER_RESOURCE_STATE_ON - && !resource->ref_count) { - dev_info(&resource->device.dev, "Turning OFF\n"); + if (!resource->ref_count && + resource->state == ACPI_POWER_RESOURCE_STATE_ON) { + acpi_handle_debug(resource->device.handle, "Turning OFF\n"); __acpi_power_off(resource); } @@ -918,4 +1142,10 @@ void acpi_turn_off_unused_power_resources(void) mutex_unlock(&power_resource_list_lock); } -#endif + +void __init acpi_power_resources_init(void) +{ + hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk); + unused_power_resources_quirk = + dmi_check_system(dmi_leave_unused_power_resources_on); +} diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index da031b1df6f5..de5f8c018333 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -21,6 +21,25 @@ #include <linux/cacheinfo.h> #include <acpi/processor.h> +/* + * The acpi_pptt_cache_v1 in actbl2.h, which is imported from acpica, + * only contains the cache_id field rather than all the fields of the + * Cache Type Structure. Use this alternative structure until it is + * resolved in acpica. + */ +struct acpi_pptt_cache_v1_full { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 next_level_of_cache; + u32 size; + u32 number_of_sets; + u8 associativity; + u8 attributes; + u16 line_size; + u32 cache_id; +} __packed; + static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, u32 pptt_ref) { @@ -56,6 +75,18 @@ static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_ return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref); } +static struct acpi_pptt_cache_v1_full *upgrade_pptt_cache(struct acpi_pptt_cache *cache) +{ + if (cache->header.length < sizeof(struct acpi_pptt_cache_v1_full)) + return NULL; + + /* No use for v1 if the only additional field is invalid */ + if (!(cache->flags & ACPI_PPTT_CACHE_ID_VALID)) + return NULL; + + return (struct acpi_pptt_cache_v1_full *)cache; +} + static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr, struct acpi_pptt_processor *node, int resource) @@ -81,6 +112,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache * @table_hdr: Pointer to the head of the PPTT table * @local_level: passed res reflects this cache level + * @split_levels: Number of split cache levels (data/instruction). * @res: cache resource in the PPTT we want to walk * @found: returns a pointer to the requested level if found * @level: the requested cache level @@ -98,11 +130,12 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * * Return: The cache structure and the level we terminated with. */ -static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, - int local_level, - struct acpi_subtable_header *res, - struct acpi_pptt_cache **found, - int level, int type) +static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, + unsigned int local_level, + unsigned int *split_levels, + struct acpi_subtable_header *res, + struct acpi_pptt_cache **found, + unsigned int level, int type) { struct acpi_pptt_cache *cache; @@ -113,13 +146,22 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, while (cache) { local_level++; + if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) { + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); + continue; + } + + if (split_levels && + (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) || + acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR))) + *split_levels = local_level; + if (local_level == level && - cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && acpi_pptt_match_type(cache->attributes, type)) { if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); - pr_debug("Found cache @ level %d\n", level); + pr_debug("Found cache @ level %u\n", level); *found = cache; /* * continue looking at this node's resource list @@ -132,23 +174,25 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, return local_level; } -static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node, - int *starting_level, int level, - int type) +static struct acpi_pptt_cache * +acpi_find_cache_level(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *starting_level, unsigned int *split_levels, + unsigned int level, int type) { struct acpi_subtable_header *res; - int number_of_levels = *starting_level; + unsigned int number_of_levels = *starting_level; int resource = 0; struct acpi_pptt_cache *ret = NULL; - int local_level; + unsigned int local_level; /* walk down from processor node */ while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) { resource++; local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, - res, &ret, level, type); + split_levels, res, &ret, + level, type); /* * we are looking for the max depth. Since its potentially * possible for a given node to have resources with differing @@ -164,29 +208,33 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t } /** - * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches + * acpi_count_levels() - Given a PPTT table, and a CPU node, count the + * total number of levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for + * @split_levels: Number of split cache levels (data/instruction) if + * success. Can by NULL. * + * Return: number of levels. * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit * the root node (ignore the package level because it may be possible to have - * caches that exist across packages). Count the number of cache levels that - * exist at each level on the way up. - * - * Return: Total number of levels found. + * caches that exist across packages). Count the number of cache levels and + * split cache levels (data/instruction) that exist at each level on the way + * up. */ static int acpi_count_levels(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node) + struct acpi_pptt_processor *cpu_node, + unsigned int *split_levels) { - int total_levels = 0; + int current_level = 0; do { - acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); + acpi_find_cache_level(table_hdr, cpu_node, ¤t_level, split_levels, 0, 0); cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } while (cpu_node); - return total_levels; + return current_level; } /** @@ -209,22 +257,27 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, struct acpi_pptt_processor *cpu_node; u32 proc_sz; + if (table_hdr->revision > 1) + return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE); + table_end = (unsigned long)table_hdr + table_hdr->length; node_entry = ACPI_PTR_DIFF(node, table_hdr); entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); - while ((unsigned long)entry + proc_sz < table_end) { + /* ignore subtable types that are smaller than a processor node */ + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && cpu_node->parent == node_entry) return 0; if (entry->length == 0) return 0; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, entry->length); - } return 1; } @@ -232,7 +285,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, /** * acpi_find_processor_node() - Given a PPTT table find the requested processor * @table_hdr: Pointer to the head of the PPTT table - * @acpi_cpu_id: cpu we are searching for + * @acpi_cpu_id: CPU we are searching for * * Find the subtable entry describing the provided processor. * This is done by iterating the PPTT table looking for processor nodes @@ -254,18 +307,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he table_end = (unsigned long)table_hdr + table_hdr->length; entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); /* find the processor structure associated with this cpuid */ - while ((unsigned long)entry + proc_sz < table_end) { + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; if (entry->length == 0) { pr_warn("Invalid zero length subtable\n"); break; } + /* entry->length may not equal proc_sz, revalidate the processor structure length */ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && acpi_cpu_id == cpu_node->acpi_processor_id && + (unsigned long)entry + entry->length <= table_end && + entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) && acpi_pptt_leaf_node(table_hdr, cpu_node)) { return (struct acpi_pptt_processor *)entry; } @@ -277,19 +333,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } -static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, - u32 acpi_cpu_id) -{ - int number_of_levels = 0; - struct acpi_pptt_processor *cpu; - - cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id); - if (cpu) - number_of_levels = acpi_count_levels(table_hdr, cpu); - - return number_of_levels; -} - static u8 acpi_cache_type(enum cache_type type) { switch (type) { @@ -318,19 +361,19 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta unsigned int level, struct acpi_pptt_processor **node) { - int total_levels = 0; + unsigned int total_levels = 0; struct acpi_pptt_cache *found = NULL; struct acpi_pptt_processor *cpu_node; u8 acpi_type = acpi_cache_type(type); - pr_debug("Looking for CPU %d's level %d cache type %d\n", + pr_debug("Looking for CPU %d's level %u cache type %d\n", acpi_cpu_id, level, acpi_type); cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id); while (cpu_node && !found) { found = acpi_find_cache_level(table_hdr, cpu_node, - &total_levels, level, acpi_type); + &total_levels, NULL, level, acpi_type); *node = cpu_node; cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } @@ -354,6 +397,8 @@ static void update_cache_properties(struct cacheinfo *this_leaf, struct acpi_pptt_cache *found_cache, struct acpi_pptt_processor *cpu_node) { + struct acpi_pptt_cache_v1_full *found_cache_v1; + this_leaf->fw_token = cpu_node; if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) this_leaf->size = found_cache->size; @@ -401,6 +446,12 @@ static void update_cache_properties(struct cacheinfo *this_leaf, if (this_leaf->type == CACHE_TYPE_NOCACHE && found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID) this_leaf->type = CACHE_TYPE_UNIFIED; + + found_cache_v1 = upgrade_pptt_cache(found_cache); + if (found_cache_v1) { + this_leaf->id = found_cache_v1->cache_id; + this_leaf->attributes |= CACHE_ID; + } } static void cache_setup_acpi_cpu(struct acpi_table_header *table, @@ -421,25 +472,47 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table, &cpu_node); pr_debug("found = %p %p\n", found_cache, cpu_node); if (found_cache) - update_cache_properties(this_leaf, - found_cache, - cpu_node); + update_cache_properties(this_leaf, found_cache, + ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table))); index++; } } +static bool flag_identical(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu) +{ + struct acpi_pptt_processor *next; + + /* heterogeneous machines must use PPTT revision > 1 */ + if (table_hdr->revision < 2) + return false; + + /* Locate the last node in the tree with IDENTICAL set */ + if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) { + next = fetch_pptt_node(table_hdr, cpu->parent); + if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL)) + return true; + } + + return false; +} + /* Passing level values greater than this will result in search termination */ #define PPTT_ABORT_PACKAGE 0xFF -static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu, - int level, int flag) +static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu, + int level, int flag) { struct acpi_pptt_processor *prev_node; while (cpu && level) { - if (cpu->flags & flag) + /* special case the identical flag to find last identical */ + if (flag == ACPI_PPTT_ACPI_IDENTICAL) { + if (flag_identical(table_hdr, cpu)) + break; + } else if (cpu->flags & flag) break; pr_debug("level %d\n", level); prev_node = fetch_pptt_node(table_hdr, cpu->parent); @@ -451,18 +524,23 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta return cpu; } +static void acpi_pptt_warn_missing(void) +{ + pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n"); +} + /** * topology_get_acpi_cpu_tag() - Find a unique topology value for a feature * @table: Pointer to the head of the PPTT table - * @cpu: Kernel logical cpu number + * @cpu: Kernel logical CPU number * @level: A level that terminates the search * @flag: A flag which terminates the search * - * Get a unique value given a cpu, and a topology level, that can be + * Get a unique value given a CPU, and a topology level, that can be * matched to determine which cpus share common topological features * at that level. * - * Return: Unique value, or -ENOENT if unable to locate cpu + * Return: Unique value, or -ENOENT if unable to locate CPU */ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, unsigned int cpu, int level, int flag) @@ -472,8 +550,8 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, cpu_node = acpi_find_processor_node(table, acpi_cpu_id); if (cpu_node) { - cpu_node = acpi_find_processor_package_id(table, cpu_node, - level, flag); + cpu_node = acpi_find_processor_tag(table, cpu_node, + level, flag); /* * As per specification if the processor structure represents * an actual processor, then ACPI processor ID must be valid. @@ -490,92 +568,167 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, return -ENOENT; } + +static struct acpi_table_header *acpi_get_pptt(void) +{ + static struct acpi_table_header *pptt; + static bool is_pptt_checked; + acpi_status status; + + /* + * PPTT will be used at runtime on every CPU hotplug in path, so we + * don't need to call acpi_put_table() to release the table mapping. + */ + if (!pptt && !is_pptt_checked) { + status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt); + if (ACPI_FAILURE(status)) + acpi_pptt_warn_missing(); + + is_pptt_checked = true; + } + + return pptt; +} + static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) { struct acpi_table_header *table; - acpi_status status; int retval; - status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); - if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n"); + table = acpi_get_pptt(); + if (!table) return -ENOENT; - } + retval = topology_get_acpi_cpu_tag(table, cpu, level, flag); - pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n", + pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n", cpu, level, retval); - acpi_put_table(table); return retval; } /** - * acpi_find_last_cache_level() - Determines the number of cache levels for a PE - * @cpu: Kernel logical cpu number + * check_acpi_cpu_flag() - Determine if CPU node has a flag set + * @cpu: Kernel logical CPU number + * @rev: The minimum PPTT revision defining the flag + * @flag: The flag itself + * + * Check the node representing a CPU for a given flag. * - * Given a logical cpu number, returns the number of levels of cache represented + * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + * 1, any passed flag set + * 0, flag unset + */ +static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) +{ + struct acpi_table_header *table; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_processor *cpu_node = NULL; + int ret = -ENOENT; + + table = acpi_get_pptt(); + if (!table) + return -ENOENT; + + if (table->revision >= rev) + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + + if (cpu_node) + ret = (cpu_node->flags & flag) != 0; + + return ret; +} + +/** + * acpi_get_cache_info() - Determine the number of cache levels and + * split cache levels (data/instruction) and for a PE. + * @cpu: Kernel logical CPU number + * @levels: Number of levels if success. + * @split_levels: Number of levels being split (i.e. data/instruction) + * if success. Can by NULL. + * + * Given a logical CPU number, returns the number of levels of cache represented * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 * indicating we didn't find any cache levels. * - * Return: Cache levels visible to this core. + * Return: -ENOENT if no PPTT table or no PPTT processor struct found. + * 0 on success. */ -int acpi_find_last_cache_level(unsigned int cpu) +int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, + unsigned int *split_levels) { - u32 acpi_cpu_id; + struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; - int number_of_levels = 0; - acpi_status status; + u32 acpi_cpu_id; + + *levels = 0; + if (split_levels) + *split_levels = 0; - pr_debug("Cache Setup find last level cpu=%d\n", cpu); + table = acpi_get_pptt(); + if (!table) + return -ENOENT; + + pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu); acpi_cpu_id = get_acpi_id_for_cpu(cpu); - status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); - if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); - } else { - number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); - acpi_put_table(table); - } - pr_debug("Cache Setup find last level level=%d\n", number_of_levels); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + return -ENOENT; + + *levels = acpi_count_levels(table, cpu_node, split_levels); + + pr_debug("Cache Setup: last_level=%d split_levels=%d\n", + *levels, split_levels ? *split_levels : -1); - return number_of_levels; + return 0; } /** * cache_setup_acpi() - Override CPU cache topology with data from the PPTT - * @cpu: Kernel logical cpu number + * @cpu: Kernel logical CPU number * * Updates the global cache info provided by cpu_get_cacheinfo() * when there are valid properties in the acpi_pptt_cache nodes. A * successful parse may not result in any updates if none of the - * cache levels have any valid flags set. Futher, a unique value is + * cache levels have any valid flags set. Further, a unique value is * associated with each known CPU cache entry. This unique value - * can be used to determine whether caches are shared between cpus. + * can be used to determine whether caches are shared between CPUs. * * Return: -ENOENT on failure to find table, or 0 on success */ int cache_setup_acpi(unsigned int cpu) { struct acpi_table_header *table; - acpi_status status; - - pr_debug("Cache Setup ACPI cpu %d\n", cpu); - status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); - if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); + table = acpi_get_pptt(); + if (!table) return -ENOENT; - } + + pr_debug("Cache Setup ACPI CPU %d\n", cpu); cache_setup_acpi_cpu(table, cpu); - acpi_put_table(table); - return status; + return 0; } /** - * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu - * @cpu: Kernel logical cpu number + * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread + * @cpu: Kernel logical CPU number + * + * Return: 1, a thread + * 0, not a thread + * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + */ +int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD); +} + +/** + * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU + * @cpu: Kernel logical CPU number * @level: The topological level for which we would like a unique ID * * Determine a topology unique ID for each thread/core/cluster/mc_grouping @@ -588,7 +741,7 @@ int cache_setup_acpi(unsigned int cpu) * other levels beyond this use a generated value to uniquely identify * a topological feature. * - * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. + * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found. * Otherwise returns a value which represents a unique topological feature. */ int find_acpi_cpu_topology(unsigned int cpu, int level) @@ -597,58 +750,316 @@ int find_acpi_cpu_topology(unsigned int cpu, int level) } /** - * find_acpi_cpu_cache_topology() - Determine a unique cache topology value - * @cpu: Kernel logical cpu number - * @level: The cache level for which we would like a unique ID + * find_acpi_cpu_topology_package() - Determine a unique CPU package value + * @cpu: Kernel logical CPU number * - * Determine a unique ID for each unified cache in the system + * Determine a topology unique package ID for the given CPU. + * This ID can then be used to group peers, which will have matching ids. * - * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. - * Otherwise returns a value which represents a unique topological feature. + * The search terminates when either a level is found with the PHYSICAL_PACKAGE + * flag set or we reach a root node. + * + * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found. + * Otherwise returns a value which represents the package for this CPU. */ -int find_acpi_cpu_cache_topology(unsigned int cpu, int level) +int find_acpi_cpu_topology_package(unsigned int cpu) +{ + return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, + ACPI_PPTT_PHYSICAL_PACKAGE); +} + +/** + * find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value + * @cpu: Kernel logical CPU number + * + * Determine a topology unique cluster ID for the given CPU/thread. + * This ID can then be used to group peers, which will have matching ids. + * + * The cluster, if present is the level of topology above CPUs. In a + * multi-thread CPU, it will be the level above the CPU, not the thread. + * It may not exist in single CPU systems. In simple multi-CPU systems, + * it may be equal to the package topology level. + * + * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found + * or there is no toplogy level above the CPU.. + * Otherwise returns a value which represents the package for this CPU. + */ + +int find_acpi_cpu_topology_cluster(unsigned int cpu) { struct acpi_table_header *table; - struct acpi_pptt_cache *found_cache; - acpi_status status; - u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); - struct acpi_pptt_processor *cpu_node = NULL; - int ret = -1; + struct acpi_pptt_processor *cpu_node, *cluster_node; + u32 acpi_cpu_id; + int retval; + int is_thread; - status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); - if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, topology may be inaccurate\n"); + table = acpi_get_pptt(); + if (!table) + return -ENOENT; + + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node || !cpu_node->parent) return -ENOENT; - } - found_cache = acpi_find_cache_node(table, acpi_cpu_id, - CACHE_TYPE_UNIFIED, - level, - &cpu_node); - if (found_cache) - ret = ACPI_PTR_DIFF(cpu_node, table); + is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD; + cluster_node = fetch_pptt_node(table, cpu_node->parent); + if (!cluster_node) + return -ENOENT; - acpi_put_table(table); + if (is_thread) { + if (!cluster_node->parent) + return -ENOENT; - return ret; -} + cluster_node = fetch_pptt_node(table, cluster_node->parent); + if (!cluster_node) + return -ENOENT; + } + if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) + retval = cluster_node->acpi_processor_id; + else + retval = ACPI_PTR_DIFF(cluster_node, table); + return retval; +} /** - * find_acpi_cpu_topology_package() - Determine a unique cpu package value - * @cpu: Kernel logical cpu number + * find_acpi_cpu_topology_hetero_id() - Get a core architecture tag + * @cpu: Kernel logical CPU number * - * Determine a topology unique package ID for the given cpu. - * This ID can then be used to group peers, which will have matching ids. + * Determine a unique heterogeneous tag for the given CPU. CPUs with the same + * implementation should have matching tags. * - * The search terminates when either a level is found with the PHYSICAL_PACKAGE + * The returned tag can be used to group peers with identical implementation. + * + * The search terminates when a level is found with the identical implementation * flag set or we reach a root node. * - * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. - * Otherwise returns a value which represents the package for this cpu. + * Due to limitations in the PPTT data structure, there may be rare situations + * where two cores in a heterogeneous machine may be identical, but won't have + * the same tag. + * + * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found. + * Otherwise returns a value which represents a group of identical cores + * similar to this CPU. */ -int find_acpi_cpu_topology_package(unsigned int cpu) +int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, - ACPI_PPTT_PHYSICAL_PACKAGE); + ACPI_PPTT_ACPI_IDENTICAL); +} + +/** + * acpi_pptt_get_child_cpus() - Find all the CPUs below a PPTT + * processor hierarchy node + * + * @table_hdr: A reference to the PPTT table + * @parent_node: A pointer to the processor hierarchy node in the + * table_hdr + * @cpus: A cpumask to fill with the CPUs below @parent_node + * + * Walks up the PPTT from every possible CPU to find if the provided + * @parent_node is a parent of this CPU. + */ +static void acpi_pptt_get_child_cpus(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *parent_node, + cpumask_t *cpus) +{ + struct acpi_pptt_processor *cpu_node; + u32 acpi_id; + int cpu; + + cpumask_clear(cpus); + + for_each_possible_cpu(cpu) { + acpi_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table_hdr, acpi_id); + + while (cpu_node) { + if (cpu_node == parent_node) { + cpumask_set_cpu(cpu, cpus); + break; + } + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + } +} + +/** + * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a + * processor container + * @acpi_cpu_id: The UID of the processor container + * @cpus: The resulting CPU mask + * + * Find the specified Processor Container, and fill @cpus with all the cpus + * below it. + * + * Not all 'Processor Hierarchy' entries in the PPTT are either a CPU + * or a Processor Container, they may exist purely to describe a + * Private resource. CPUs have to be leaves, so a Processor Container + * is a non-leaf that has the 'ACPI Processor ID valid' flag set. + */ +void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus) +{ + struct acpi_table_header *table_hdr; + struct acpi_subtable_header *entry; + unsigned long table_end; + u32 proc_sz; + + cpumask_clear(cpus); + + table_hdr = acpi_get_pptt(); + if (!table_hdr) + return; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + while ((unsigned long)entry + proc_sz <= table_end) { + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR) { + struct acpi_pptt_processor *cpu_node; + + cpu_node = (struct acpi_pptt_processor *)entry; + if (cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID && + !acpi_pptt_leaf_node(table_hdr, cpu_node) && + cpu_node->acpi_processor_id == acpi_cpu_id) { + acpi_pptt_get_child_cpus(table_hdr, cpu_node, cpus); + break; + } + } + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } +} + +/** + * find_acpi_cache_level_from_id() - Get the level of the specified cache + * @cache_id: The id field of the cache + * + * Determine the level relative to any CPU for the cache identified by + * cache_id. This allows the property to be found even if the CPUs are offline. + * + * The returned level can be used to group caches that are peers. + * + * The PPTT table must be rev 3 or later. + * + * If one CPU's L2 is shared with another CPU as L3, this function will return + * an unpredictable value. + * + * Return: -ENOENT if the PPTT doesn't exist, the revision isn't supported or + * the cache cannot be found. + * Otherwise returns a value which represents the level of the specified cache. + */ +int find_acpi_cache_level_from_id(u32 cache_id) +{ + int cpu; + struct acpi_table_header *table; + + table = acpi_get_pptt(); + if (!table) + return -ENOENT; + + if (table->revision < 3) + return -ENOENT; + + for_each_possible_cpu(cpu) { + bool empty; + int level = 1; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_cache *cache; + struct acpi_pptt_processor *cpu_node; + + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + do { + int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED}; + + empty = true; + for (int i = 0; i < ARRAY_SIZE(cache_type); i++) { + struct acpi_pptt_cache_v1_full *cache_v1; + + cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i], + level, &cpu_node); + if (!cache) + continue; + + empty = false; + + cache_v1 = upgrade_pptt_cache(cache); + if (cache_v1 && cache_v1->cache_id == cache_id) + return level; + } + level++; + } while (!empty); + } + + return -ENOENT; +} + +/** + * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the + * specified cache + * @cache_id: The id field of the cache + * @cpus: Where to build the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) +{ + int cpu; + struct acpi_table_header *table; + + cpumask_clear(cpus); + + table = acpi_get_pptt(); + if (!table) + return -ENOENT; + + if (table->revision < 3) + return -ENOENT; + + for_each_possible_cpu(cpu) { + bool empty; + int level = 1; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_cache *cache; + struct acpi_pptt_processor *cpu_node; + + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + do { + int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED}; + + empty = true; + for (int i = 0; i < ARRAY_SIZE(cache_type); i++) { + struct acpi_pptt_cache_v1_full *cache_v1; + + cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i], + level, &cpu_node); + + if (!cache) + continue; + + empty = false; + + cache_v1 = upgrade_pptt_cache(cache); + if (cache_v1 && cache_v1->cache_id == cache_id) + cpumask_set_cpu(cpu, cpus); + } + level++; + } while (!empty); + } + + return 0; } diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c new file mode 100644 index 000000000000..7b8b5d2015ec --- /dev/null +++ b/drivers/acpi/prmt.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Erik Kaneda <erik.kaneda@intel.com> + * Copyright 2020 Intel Corporation + * + * prmt.c + * + * Each PRM service is an executable that is run in a restricted environment + * that is invoked by writing to the PlatformRtMechanism OperationRegion from + * AML bytecode. + * + * init_prmt initializes the Platform Runtime Mechanism (PRM) services by + * processing data in the PRMT as well as registering an ACPI OperationRegion + * handler for the PlatformRtMechanism subtype. + * + */ +#include <linux/kernel.h> +#include <linux/efi.h> +#include <linux/acpi.h> +#include <linux/prmt.h> +#include <asm/efi.h> + +#pragma pack(1) +struct prm_mmio_addr_range { + u64 phys_addr; + u64 virt_addr; + u32 length; +}; + +struct prm_mmio_info { + u64 mmio_count; + struct prm_mmio_addr_range addr_ranges[]; +}; + +struct prm_buffer { + u8 prm_status; + u64 efi_status; + u8 prm_cmd; + guid_t handler_guid; +}; + +struct prm_context_buffer { + char signature[ACPI_NAMESEG_SIZE]; + u16 revision; + u16 reserved; + guid_t identifier; + u64 static_data_buffer; + struct prm_mmio_info *mmio_ranges; +}; +#pragma pack() + +static LIST_HEAD(prm_module_list); + +struct prm_handler_info { + efi_guid_t guid; + efi_status_t (__efiapi *handler_addr)(u64, void *); + u64 static_data_buffer_addr; + u64 acpi_param_buffer_addr; + + struct list_head handler_list; +}; + +struct prm_module_info { + guid_t guid; + u16 major_rev; + u16 minor_rev; + u16 handler_count; + struct prm_mmio_info *mmio_info; + bool updatable; + + struct list_head module_list; + struct prm_handler_info handlers[] __counted_by(handler_count); +}; + +static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa) +{ + efi_memory_desc_t *md; + u64 pa_offset = pa & ~PAGE_MASK; + u64 page = pa & PAGE_MASK; + + for_each_efi_memory_desc(md) { + if ((md->attribute & EFI_MEMORY_RUNTIME) && + (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) { + return pa_offset + md->virt_addr + page - md->phys_addr; + } + } + + return 0; +} + +#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset)) +#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a)) + +static int __init +acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_prmt_module_info *module_info; + struct acpi_prmt_handler_info *handler_info; + struct prm_handler_info *th; + struct prm_module_info *tm; + u64 *mmio_count; + u64 cur_handler = 0; + u32 module_info_size = 0; + u64 mmio_range_size = 0; + void *temp_mmio; + + module_info = (struct acpi_prmt_module_info *) header; + module_info_size = struct_size(tm, handlers, module_info->handler_info_count); + tm = kmalloc(module_info_size, GFP_KERNEL); + if (!tm) + goto parse_prmt_out1; + + guid_copy(&tm->guid, (guid_t *) module_info->module_guid); + tm->major_rev = module_info->major_rev; + tm->minor_rev = module_info->minor_rev; + tm->handler_count = module_info->handler_info_count; + tm->updatable = true; + + if (module_info->mmio_list_pointer) { + /* + * Each module is associated with a list of addr + * ranges that it can use during the service + */ + mmio_count = (u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB); + if (!mmio_count) + goto parse_prmt_out2; + + mmio_range_size = struct_size(tm->mmio_info, addr_ranges, *mmio_count); + tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL); + if (!tm->mmio_info) + goto parse_prmt_out3; + + temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB); + if (!temp_mmio) + goto parse_prmt_out4; + memmove(tm->mmio_info, temp_mmio, mmio_range_size); + } else { + tm->mmio_info = kmalloc(sizeof(*tm->mmio_info), GFP_KERNEL); + if (!tm->mmio_info) + goto parse_prmt_out2; + + tm->mmio_info->mmio_count = 0; + } + + INIT_LIST_HEAD(&tm->module_list); + list_add(&tm->module_list, &prm_module_list); + + handler_info = get_first_handler(module_info); + do { + th = &tm->handlers[cur_handler]; + + guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); + + /* + * Print an error message if handler_address is NULL, the parse of VA also + * can be skipped. + */ + if (unlikely(!handler_info->handler_address)) { + pr_info("Skipping handler with NULL address for GUID: %pUL", + (guid_t *)handler_info->handler_guid); + continue; + } + + th->handler_addr = + (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); + /* + * Print a warning message and skip the parse of VA if handler_addr is zero + * which is not expected to ever happen. + */ + if (unlikely(!th->handler_addr)) { + pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->handler_address); + continue; + } + + th->static_data_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); + /* + * According to the PRM specification, static_data_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address)) + pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->static_data_buffer_address); + + th->acpi_param_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address); + + /* + * According to the PRM specification, acpi_param_buffer_address can be zero, + * so avoid printing a warning message in that case. Otherwise, if the + * return value of efi_pa_va_lookup() is zero, print the message. + */ + if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address)) + pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx", + &th->guid, handler_info->acpi_param_buffer_address); + + } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info))); + + return 0; + +parse_prmt_out4: + kfree(tm->mmio_info); +parse_prmt_out3: + memunmap(mmio_count); +parse_prmt_out2: + kfree(tm); +parse_prmt_out1: + return -ENOMEM; +} + +#define GET_MODULE 0 +#define GET_HANDLER 1 + +static void *find_guid_info(const guid_t *guid, u8 mode) +{ + struct prm_handler_info *cur_handler; + struct prm_module_info *cur_module; + int i = 0; + + list_for_each_entry(cur_module, &prm_module_list, module_list) { + for (i = 0; i < cur_module->handler_count; ++i) { + cur_handler = &cur_module->handlers[i]; + if (guid_equal(guid, &cur_handler->guid)) { + if (mode == GET_MODULE) + return (void *)cur_module; + else + return (void *)cur_handler; + } + } + } + + return NULL; +} + +static struct prm_module_info *find_prm_module(const guid_t *guid) +{ + return (struct prm_module_info *)find_guid_info(guid, GET_MODULE); +} + +static struct prm_handler_info *find_prm_handler(const guid_t *guid) +{ + return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER); +} + +bool acpi_prm_handler_available(const guid_t *guid) +{ + return find_prm_handler(guid) && find_prm_module(guid); +} +EXPORT_SYMBOL_GPL(acpi_prm_handler_available); + +/* In-coming PRM commands */ + +#define PRM_CMD_RUN_SERVICE 0 +#define PRM_CMD_START_TRANSACTION 1 +#define PRM_CMD_END_TRANSACTION 2 + +/* statuses that can be passed back to ASL */ + +#define PRM_HANDLER_SUCCESS 0 +#define PRM_HANDLER_ERROR 1 +#define INVALID_PRM_COMMAND 2 +#define PRM_HANDLER_GUID_NOT_FOUND 3 +#define UPDATE_LOCK_ALREADY_HELD 4 +#define UPDATE_UNLOCK_WITHOUT_LOCK 5 + +int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer) +{ + struct prm_handler_info *handler = find_prm_handler(&handler_guid); + struct prm_module_info *module = find_prm_module(&handler_guid); + struct prm_context_buffer context; + efi_status_t status; + + if (!module || !handler) + return -ENODEV; + + memset(&context, 0, sizeof(context)); + ACPI_COPY_NAMESEG(context.signature, "PRMC"); + context.identifier = handler->guid; + context.static_data_buffer = handler->static_data_buffer_addr; + context.mmio_ranges = module->mmio_info; + + status = efi_call_acpi_prm_handler(handler->handler_addr, + (u64)param_buffer, + &context); + + return efi_status_to_err(status); +} +EXPORT_SYMBOL_GPL(acpi_call_prm_handler); + +/* + * This is the PlatformRtMechanism opregion space handler. + * @function: indicates the read/write. In fact as the PlatformRtMechanism + * message is driven by command, only write is meaningful. + * + * @addr : not used + * @bits : not used. + * @value : it is an in/out parameter. It points to the PRM message buffer. + * @handler_context: not used + */ +static acpi_status acpi_platformrt_space_handler(u32 function, + acpi_physical_address addr, + u32 bits, acpi_integer *value, + void *handler_context, + void *region_context) +{ + struct prm_buffer *buffer = ACPI_CAST_PTR(struct prm_buffer, value); + struct prm_handler_info *handler; + struct prm_module_info *module; + efi_status_t status; + struct prm_context_buffer context; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_err_ratelimited("PRM: EFI runtime services no longer available\n"); + return AE_NO_HANDLER; + } + + /* + * The returned acpi_status will always be AE_OK. Error values will be + * saved in the first byte of the PRM message buffer to be used by ASL. + */ + switch (buffer->prm_cmd) { + case PRM_CMD_RUN_SERVICE: + + handler = find_prm_handler(&buffer->handler_guid); + module = find_prm_module(&buffer->handler_guid); + if (!handler || !module) + goto invalid_guid; + + if (!handler->handler_addr) { + buffer->prm_status = PRM_HANDLER_ERROR; + return AE_OK; + } + + ACPI_COPY_NAMESEG(context.signature, "PRMC"); + context.revision = 0x0; + context.reserved = 0x0; + context.identifier = handler->guid; + context.static_data_buffer = handler->static_data_buffer_addr; + context.mmio_ranges = module->mmio_info; + + status = efi_call_acpi_prm_handler(handler->handler_addr, + handler->acpi_param_buffer_addr, + &context); + if (status == EFI_SUCCESS) { + buffer->prm_status = PRM_HANDLER_SUCCESS; + } else { + buffer->prm_status = PRM_HANDLER_ERROR; + buffer->efi_status = status; + } + break; + + case PRM_CMD_START_TRANSACTION: + + module = find_prm_module(&buffer->handler_guid); + if (!module) + goto invalid_guid; + + if (module->updatable) + module->updatable = false; + else + buffer->prm_status = UPDATE_LOCK_ALREADY_HELD; + break; + + case PRM_CMD_END_TRANSACTION: + + module = find_prm_module(&buffer->handler_guid); + if (!module) + goto invalid_guid; + + if (module->updatable) + buffer->prm_status = UPDATE_UNLOCK_WITHOUT_LOCK; + else + module->updatable = true; + break; + + default: + + buffer->prm_status = INVALID_PRM_COMMAND; + break; + } + + return AE_OK; + +invalid_guid: + buffer->prm_status = PRM_HANDLER_GUID_NOT_FOUND; + return AE_OK; +} + +void __init init_prmt(void) +{ + struct acpi_table_header *tbl; + acpi_status status; + int mc; + + status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl); + if (ACPI_FAILURE(status)) + return; + + mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) + + sizeof (struct acpi_table_prmt_header), + 0, acpi_parse_prmt, 0); + acpi_put_table(tbl); + /* + * Return immediately if PRMT table is not present or no PRM module found. + */ + if (mc <= 0) + return; + + pr_info("PRM: found %u modules\n", mc); + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_err("PRM: EFI runtime services unavailable\n"); + return; + } + + status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, + ACPI_ADR_SPACE_PLATFORM_RT, + &acpi_platformrt_space_handler, + NULL, NULL); + if (ACPI_FAILURE(status)) + pr_alert("PRM: OperationRegion handler could not be installed\n"); +} diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 652f19e6c541..c08ead07252b 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/export.h> +#include <linux/string_choices.h> #include <linux/suspend.h> #include <linux/bcd.h> #include <linux/acpi.h> @@ -10,42 +10,36 @@ #include "sleep.h" #include "internal.h" -#define _COMPONENT ACPI_SYSTEM_COMPONENT - /* * this file provides support for: * /proc/acpi/wakeup */ -ACPI_MODULE_NAME("sleep") - static int acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { struct acpi_device_physical_node *entry; if (!dev->wakeup.flags.valid) continue; - seq_printf(seq, "%s\t S%d\t", + seq_printf(seq, "%s\t S%llu\t", dev->pnp.bus_id, - (u32) dev->wakeup.sleep_state); + dev->wakeup.sleep_state); mutex_lock(&dev->physical_node_lock); if (!dev->physical_node_count) { seq_printf(seq, "%c%-8s\n", dev->wakeup.flags.valid ? '*' : ' ', - device_may_wakeup(&dev->dev) ? - "enabled" : "disabled"); + str_enabled_disabled(device_may_wakeup(&dev->dev))); } else { struct device *ldev; list_for_each_entry(entry, &dev->physical_node_list, @@ -60,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "%c%-8s %s:%s\n", dev->wakeup.flags.valid ? '*' : ' ', - (device_may_wakeup(&dev->dev) || - device_may_wakeup(ldev)) ? - "enabled" : "disabled", + str_enabled_disabled(device_may_wakeup(ldev) || + device_may_wakeup(&dev->dev)), ldev->bus ? ldev->bus->name : "no-bus", dev_name(ldev)); put_device(ldev); @@ -96,7 +89,7 @@ acpi_system_write_wakeup_device(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; char strbuf[5]; char str[5] = ""; @@ -109,9 +102,8 @@ acpi_system_write_wakeup_device(struct file *file, sscanf(strbuf, "%s", str); mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid) continue; @@ -133,21 +125,19 @@ static int acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_system_wakeup_device_seq_show, - PDE_DATA(inode)); + pde_data(inode)); } -static const struct file_operations acpi_system_wakeup_device_fops = { - .owner = THIS_MODULE, - .open = acpi_system_wakeup_device_open_fs, - .read = seq_read, - .write = acpi_system_write_wakeup_device, - .llseek = seq_lseek, - .release = single_release, +static const struct proc_ops acpi_system_wakeup_device_proc_ops = { + .proc_open = acpi_system_wakeup_device_open_fs, + .proc_read = seq_read, + .proc_write = acpi_system_write_wakeup_device, + .proc_lseek = seq_lseek, + .proc_release = single_release, }; void __init acpi_sleep_proc_init(void) { /* 'wakeup device' [R/W] */ - proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, - acpi_root_dir, &acpi_system_wakeup_device_fops); + proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops); } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 8c0a54d50d0e..a4498357bd16 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Intel Corporation * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. @@ -13,9 +14,6 @@ #include <linux/acpi.h> #include <acpi/processor.h> -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_core"); - static struct acpi_table_madt *get_madt_table(void) { static struct acpi_table_madt *madt; @@ -56,7 +54,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; - if (device_declaration && (apic->uid == acpi_id)) { + if (apic->uid == acpi_id && (device_declaration || acpi_id < 255)) { *apic_id = apic->local_apic_id; return 0; } @@ -92,7 +90,8 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, struct acpi_madt_generic_interrupt *gicc = container_of(entry, struct acpi_madt_generic_interrupt, header); - if (!(gicc->flags & ACPI_MADT_ENABLED)) + if (!(gicc->flags & + (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) return -ENODEV; /* device_declaration means Device object in DSDT, in the @@ -108,6 +107,56 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, return -EINVAL; } +/* + * Retrieve the RISC-V hartid for the processor + */ +static int map_rintc_hartid(struct acpi_subtable_header *entry, + int device_declaration, u32 acpi_id, + phys_cpuid_t *hartid) +{ + struct acpi_madt_rintc *rintc = + container_of(entry, struct acpi_madt_rintc, header); + + if (!(rintc->flags & ACPI_MADT_ENABLED)) + return -ENODEV; + + /* device_declaration means Device object in DSDT, in the + * RISC-V, logical processors are required to + * have a Processor Device object in the DSDT, so we should + * check device_declaration here + */ + if (device_declaration && rintc->uid == acpi_id) { + *hartid = rintc->hart_id; + return 0; + } + + return -EINVAL; +} + +/* + * Retrieve LoongArch CPU physical id + */ +static int map_core_pic_id(struct acpi_subtable_header *entry, + int device_declaration, u32 acpi_id, phys_cpuid_t *phys_id) +{ + struct acpi_madt_core_pic *core_pic = + container_of(entry, struct acpi_madt_core_pic, header); + + if (!(core_pic->flags & ACPI_MADT_ENABLED)) + return -ENODEV; + + /* device_declaration means Device object in DSDT, in LoongArch + * system, logical processor acpi_id is required in _UID property + * of DSDT table, so we should check device_declaration here + */ + if (device_declaration && (core_pic->processor_id == acpi_id)) { + *phys_id = core_pic->core_id; + return 0; + } + + return -EINVAL; +} + static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, int type, u32 acpi_id) { @@ -138,6 +187,12 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { if (!map_gicc_mpidr(header, type, acpi_id, &phys_id)) break; + } else if (header->type == ACPI_MADT_TYPE_RINTC) { + if (!map_rintc_hartid(header, type, acpi_id, &phys_id)) + break; + } else if (header->type == ACPI_MADT_TYPE_CORE_PIC) { + if (!map_core_pic_id(header, type, acpi_id, &phys_id)) + break; } entry += header->length; } @@ -161,6 +216,21 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id) return rv; } +int __init acpi_get_madt_revision(void) +{ + struct acpi_table_header *madt = NULL; + int revision; + + if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, &madt))) + return -EINVAL; + + revision = madt->revision; + + acpi_put_table(madt); + + return revision; +} + static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -189,6 +259,8 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) map_x2apic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) map_gicc_mpidr(header, type, acpi_id, &phys_id); + else if (header->type == ACPI_MADT_TYPE_CORE_PIC) + map_core_pic_id(header, type, acpi_id, &phys_id); exit: kfree(buffer.pointer); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 9d6aff22684e..65e779be64ff 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_driver.c - ACPI Processor Driver * @@ -8,20 +9,6 @@ * - Added processor hotplug support * Copyright (C) 2013, Intel Corporation * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> @@ -40,15 +27,12 @@ #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 - -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_driver"); +#define ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED 0x85 MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Processor Driver"); MODULE_LICENSE("GPL"); -static int acpi_processor_start(struct device *dev); static int acpi_processor_stop(struct device *dev); static const struct acpi_device_id processor_device_ids[] = { @@ -62,7 +46,6 @@ static struct device_driver acpi_processor_driver = { .name = "processor", .bus = &cpu_subsys, .acpi_match_table = processor_device_ids, - .probe = acpi_processor_start, .remove = acpi_processor_stop, }; @@ -99,9 +82,13 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; + case ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED: + cpufreq_update_limits(pr->id); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); + acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event); break; } @@ -115,18 +102,20 @@ static int acpi_soft_cpu_online(unsigned int cpu) struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_device *device; - if (!pr || acpi_bus_get_device(pr->handle, &device)) + if (!pr) + return 0; + + device = acpi_fetch_acpi_dev(pr->handle); + if (!device) return 0; + /* * CPU got physically hotplugged and onlined for the first time: * Initialize missing things. */ - if (pr->flags.need_hotplug_init) { + if (!pr->flags.previously_online) { int ret; - pr_info("Will online and init hotplugged CPU: %d\n", - pr->id); - pr->flags.need_hotplug_init = 0; ret = __acpi_processor_start(device); WARN(ret, "Failed to start CPU: %d\n", pr->id); } else { @@ -142,9 +131,8 @@ static int acpi_soft_cpu_online(unsigned int cpu) static int acpi_soft_cpu_dead(unsigned int cpu) { struct acpi_processor *pr = per_cpu(processors, cpu); - struct acpi_device *device; - if (!pr || acpi_bus_get_device(pr->handle, &device)) + if (!pr || !acpi_fetch_acpi_dev(pr->handle)) return 0; acpi_processor_reevaluate_tstate(pr, true); @@ -152,75 +140,17 @@ static int acpi_soft_cpu_dead(unsigned int cpu) } #ifdef CONFIG_ACPI_CPU_FREQ_PSS -static int acpi_pss_perf_init(struct acpi_processor *pr, - struct acpi_device *device) +static void acpi_pss_perf_init(struct acpi_processor *pr) { - int result = 0; - acpi_processor_ppc_has_changed(pr, 0); acpi_processor_get_throttling_info(pr); if (pr->flags.throttling) pr->flags.limit = 1; - - pr->cdev = thermal_cooling_device_register("Processor", device, - &processor_cooling_ops); - if (IS_ERR(pr->cdev)) { - result = PTR_ERR(pr->cdev); - return result; - } - - dev_dbg(&device->dev, "registered as cooling_device%d\n", - pr->cdev->id); - - result = sysfs_create_link(&device->dev.kobj, - &pr->cdev->device.kobj, - "thermal_cooling"); - if (result) { - dev_err(&device->dev, - "Failed to create sysfs link 'thermal_cooling'\n"); - goto err_thermal_unregister; - } - - result = sysfs_create_link(&pr->cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) { - dev_err(&pr->cdev->device, - "Failed to create sysfs link 'device'\n"); - goto err_remove_sysfs_thermal; - } - - return 0; - - err_remove_sysfs_thermal: - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - err_thermal_unregister: - thermal_cooling_device_unregister(pr->cdev); - - return result; -} - -static void acpi_pss_perf_exit(struct acpi_processor *pr, - struct acpi_device *device) -{ - if (pr->cdev) { - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - sysfs_remove_link(&pr->cdev->device.kobj, "device"); - thermal_cooling_device_unregister(pr->cdev); - pr->cdev = NULL; - } } #else -static inline int acpi_pss_perf_init(struct acpi_processor *pr, - struct acpi_device *device) -{ - return 0; -} - -static inline void acpi_pss_perf_exit(struct acpi_processor *pr, - struct acpi_device *device) {} +static inline void acpi_pss_perf_init(struct acpi_processor *pr) {} #endif /* CONFIG_ACPI_CPU_FREQ_PSS */ static int __acpi_processor_start(struct acpi_device *device) @@ -232,9 +162,6 @@ static int __acpi_processor_start(struct acpi_device *device) if (!pr) return -ENODEV; - if (pr->flags.need_hotplug_init) - return 0; - result = acpi_cppc_processor_probe(pr); if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) dev_dbg(&device->dev, "CPPC data invalid or not present\n"); @@ -242,38 +169,29 @@ static int __acpi_processor_start(struct acpi_device *device) if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); - result = acpi_pss_perf_init(pr, device); + acpi_pss_perf_init(pr); + + result = acpi_processor_thermal_init(pr, device); if (result) goto err_power_exit; status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify, device); - if (ACPI_SUCCESS(status)) - return 0; + if (!ACPI_SUCCESS(status)) { + result = -ENODEV; + goto err_thermal_exit; + } + pr->flags.previously_online = 1; - result = -ENODEV; - acpi_pss_perf_exit(pr, device); + return 0; +err_thermal_exit: + acpi_processor_thermal_exit(pr, device); err_power_exit: acpi_processor_power_exit(pr); return result; } -static int acpi_processor_start(struct device *dev) -{ - struct acpi_device *device = ACPI_COMPANION(dev); - int ret; - - if (!device) - return -ENODEV; - - /* Protect against concurrent CPU hotplug operations */ - cpu_hotplug_disable(); - ret = __acpi_processor_start(device); - cpu_hotplug_enable(); - return ret; -} - static int acpi_processor_stop(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); @@ -290,13 +208,38 @@ static int acpi_processor_stop(struct device *dev) return 0; acpi_processor_power_exit(pr); - acpi_pss_perf_exit(pr, device); - acpi_cppc_processor_exit(pr); + acpi_processor_thermal_exit(pr, device); + + return 0; +} + +bool acpi_processor_cpufreq_init; + +static int acpi_processor_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + + if (event == CPUFREQ_CREATE_POLICY) { + acpi_thermal_cpufreq_init(policy); + acpi_processor_ppc_init(policy); + } else if (event == CPUFREQ_REMOVE_POLICY) { + acpi_processor_ppc_exit(policy); + acpi_thermal_cpufreq_exit(policy); + } + return 0; } +static struct notifier_block acpi_processor_notifier_block = { + .notifier_call = acpi_processor_notifier, +}; + +void __weak acpi_processor_init_invariance_cppc(void) +{ } + /* * We keep the driver loaded even when ACPI is not running. * This is needed for the powernow-k8 driver, that works even without @@ -310,22 +253,35 @@ static int __init acpi_processor_driver_init(void) if (acpi_disabled) return 0; + if (!cpufreq_register_notifier(&acpi_processor_notifier_block, + CPUFREQ_POLICY_NOTIFIER)) { + acpi_processor_cpufreq_init = true; + acpi_processor_ignore_ppc_init(); + } + result = driver_register(&acpi_processor_driver); if (result < 0) return result; - result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "acpi/cpu-drv:online", - acpi_soft_cpu_online, NULL); + result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "acpi/cpu-drv:online", + acpi_soft_cpu_online, NULL); if (result < 0) goto err; hp_online = result; cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead", NULL, acpi_soft_cpu_dead); - acpi_thermal_cpufreq_init(); - acpi_processor_ppc_init(); acpi_processor_throttling_init(); + + /* + * Frequency invariance calculations on AMD platforms can't be run until + * after acpi_cppc_processor_probe() has been called for all online CPUs + */ + acpi_processor_init_invariance_cppc(); + + acpi_idle_rescan_dead_smt_siblings(); + return 0; err: driver_unregister(&acpi_processor_driver); @@ -337,8 +293,12 @@ static void __exit acpi_processor_driver_exit(void) if (acpi_disabled) return; - acpi_processor_ppc_exit(); - acpi_thermal_cpufreq_exit(); + if (acpi_processor_cpufreq_init) { + cpufreq_unregister_notifier(&acpi_processor_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + acpi_processor_cpufreq_init = false; + } + cpuhp_remove_state_nocalls(hp_online); cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD); driver_unregister(&acpi_processor_driver); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b2131c4ea124..89f2f08b2554 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_idle - idle state submodule to the ACPI processor driver * @@ -8,20 +9,6 @@ * - Added processor hotplug support * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * - Added support for C3 on SMP - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) "ACPI: " fmt @@ -32,7 +19,12 @@ #include <linux/tick.h> #include <linux/cpuidle.h> #include <linux/cpu.h> +#include <linux/minmax.h> +#include <linux/perf_event.h> #include <acpi/processor.h> +#include <linux/context_tracking.h> + +#include "internal.h" /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -42,20 +34,17 @@ */ #ifdef CONFIG_X86 #include <asm/apic.h> +#include <asm/cpu.h> #endif -#define ACPI_PROCESSOR_CLASS "processor" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_idle"); - #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; -module_param(max_cstate, uint, 0000); -static unsigned int nocst __read_mostly; -module_param(nocst, uint, 0000); -static int bm_check_disable __read_mostly; -module_param(bm_check_disable, uint, 0000); +module_param(max_cstate, uint, 0400); +static bool nocst __read_mostly; +module_param(nocst, bool, 0400); +static bool bm_check_disable __read_mostly; +module_param(bm_check_disable, bool, 0400); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); @@ -68,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = { }; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE +void acpi_idle_rescan_dead_smt_siblings(void) +{ + if (cpuidle_get_driver() == &acpi_idle_driver) + arch_cpu_rescan_dead_smt_siblings(); +} + static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); @@ -121,8 +116,8 @@ static const struct dmi_system_id processor_power_dmi_table[] = { static void __cpuidle acpi_safe_halt(void) { if (!tif_need_resched()) { - safe_halt(); - local_irq_disable(); + raw_safe_halt(); + raw_local_irq_disable(); } } @@ -159,7 +154,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, static void __lapic_timer_propagate_broadcast(void *arg) { - struct acpi_processor *pr = (struct acpi_processor *) arg; + struct acpi_processor *pr = arg; if (pr->power.timer_broadcast_on_state < INT_MAX) tick_broadcast_enable(); @@ -174,18 +169,10 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) } /* Power(C) State timer broadcast control */ -static void lapic_timer_state_broadcast(struct acpi_processor *pr, - struct acpi_processor_cx *cx, - int broadcast) +static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx) { - int state = cx - pr->power.states; - - if (state >= pr->power.timer_broadcast_on_state) { - if (broadcast) - tick_broadcast_enter(); - else - tick_broadcast_exit(); - } + return cx - pr->power.states >= pr->power.timer_broadcast_on_state; } #else @@ -193,10 +180,11 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, static void lapic_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_cx *cstate) { } static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } -static void lapic_timer_state_broadcast(struct acpi_processor *pr, - struct acpi_processor_cx *cx, - int broadcast) + +static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx) { + return false; } #endif @@ -209,14 +197,14 @@ static void tsc_check_state(int state) case X86_VENDOR_AMD: case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. */ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) return; - - /*FALL THROUGH*/ + fallthrough; default: /* TSC could halt in idle, so notify users */ if (state > ACPI_STATE_C1) @@ -260,8 +248,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) * 100 microseconds. */ if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); + acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n", + acpi_gbl_FADT.c2_latency); /* invalidate C2 */ pr->power.states[ACPI_STATE_C2].address = 0; } @@ -271,16 +259,26 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) * 1000 microseconds. */ if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); + acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n", + acpi_gbl_FADT.c3_latency); /* invalidate C3 */ pr->power.states[ACPI_STATE_C3].address = 0; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "lvl2[0x%08x] lvl3[0x%08x]\n", + acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n", pr->power.states[ACPI_STATE_C2].address, - pr->power.states[ACPI_STATE_C3].address)); + pr->power.states[ACPI_STATE_C3].address); + + snprintf(pr->power.states[ACPI_STATE_C2].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C2].address); + snprintf(pr->power.states[ACPI_STATE_C3].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C3].address); + + if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; return 0; } @@ -304,164 +302,20 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } - - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } + if (!pr->power.count) + return -EFAULT; - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -481,9 +335,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * the erratum), but this is known to disrupt certain ISA * devices thus we take the conservative approach. */ - else if (errata.piix4.fdma) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 not supported on PIIX4 with Type-F DMA\n")); + if (errata.piix4.fdma) { + acpi_handle_debug(pr->handle, + "C3 not supported on PIIX4 with Type-F DMA\n"); return; } @@ -502,13 +356,13 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, if (!pr->flags.bm_control) { if (pr->flags.has_cst != 1) { /* bus mastering control is necessary */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support requires BM control\n")); + acpi_handle_debug(pr->handle, + "C3 support requires BM control\n"); return; } else { /* Here we enter C3 without bus mastering */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support without BM control\n")); + acpi_handle_debug(pr->handle, + "C3 support without BM control\n"); } } } else { @@ -517,9 +371,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * supported on when bm_check is not required. */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Cache invalidation should work properly" - " for C3 to be enabled on SMP systems\n")); + " for C3 to be enabled on SMP systems\n"); return; } } @@ -541,14 +395,35 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * handle BM_RLD is to set it and leave it set. */ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); +} + +static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) +{ + int i, j, k; + + for (i = 1; i < length; i++) { + if (!states[i].valid) + continue; - return; + for (j = i - 1, k = i; j >= 0; j--) { + if (!states[j].valid) + continue; + + if (states[j].latency > states[k].latency) + swap(states[j].latency, states[k].latency); + + k = j; + } + } } static int acpi_processor_power_verify(struct acpi_processor *pr) { unsigned int i; unsigned int working = 0; + unsigned int last_latency = 0; + unsigned int last_type = 0; + bool buggy_latency = false; pr->power.timer_broadcast_on_state = INT_MAX; @@ -572,23 +447,30 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) } if (!cx->valid) continue; + if (cx->type >= last_type && cx->latency < last_latency) + buggy_latency = true; + last_latency = cx->latency; + last_type = cx->type; lapic_timer_check_state(i, pr, cx); tsc_check_state(cx->type); working++; } + if (buggy_latency) { + pr_notice("FW issue: working around C-state latencies out of order\n"); + acpi_cst_latency_sort(&pr->power.states[1], max_cstate); + } + lapic_timer_propagate_broadcast(pr); - return (working); + return working; } static int acpi_processor_get_cstate_info(struct acpi_processor *pr) { - unsigned int i; int result; - /* NOTE: the idle thread may not be running while calling * this function */ @@ -605,18 +487,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr) acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr); - - /* - * if one state of type C2 or C3 is available, mark this - * CPU as being "idle manageable" - */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - if (pr->power.states[i].valid) { - pr->power.count = i; - if (pr->power.states[i].type >= ACPI_STATE_C2) - pr->flags.power = 1; - } - } + pr->flags.power = 1; return 0; } @@ -647,6 +518,39 @@ static int acpi_idle_bm_check(void) return bm_status; } +static __cpuidle void io_idle(unsigned long addr) +{ + /* IO port based C-state */ + inb(addr); + +#ifdef CONFIG_X86 + /* No delay is needed if we are in guest */ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + /* + * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, + * not this code. Assume that any Intel systems using this + * are ancient and may need the dummy wait. This also assumes + * that the motivating chipset issue was Intel-only. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; +#endif + /* + * Dummy wait op - must do something useless after P_LVL2 read + * because chipsets cannot guarantee that STPCLK# signal gets + * asserted in time to freeze execution properly + * + * This workaround has been in place since the original ACPI + * implementation was merged, circa 2002. + * + * If a profile is pointing to this instruction, please first + * consider moving your system to a more modern idle + * mechanism. + */ + inl(acpi_gbl_FADT.xpm_timer_block.address); +} + /** * acpi_idle_do_entry - enter idle state using the appropriate method * @cx: cstate data @@ -655,19 +559,18 @@ static int acpi_idle_bm_check(void) */ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) { + perf_lopwr_cb(true); + if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); } else if (cx->entry_method == ACPI_CSTATE_HALT) { acpi_safe_halt(); } else { - /* IO port based C-state */ - inb(cx->address); - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ - inl(acpi_gbl_FADT.xpm_timer_block.address); + io_idle(cx->address); } + + perf_lopwr_cb(false); } /** @@ -675,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) * @dev: the target CPU * @index: the index of suggested state */ -static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) +static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -684,20 +587,17 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) while (1) { if (cx->entry_method == ACPI_CSTATE_HALT) - safe_halt(); + raw_safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { - inb(cx->address); - /* See comment in acpi_idle_do_entry() */ - inl(acpi_gbl_FADT.xpm_timer_block.address); + io_idle(cx->address); + } else if (cx->entry_method == ACPI_CSTATE_FFH) { + acpi_processor_ffh_play_dead(cx); } else - return -ENODEV; + return; } - - /* Never reached */ - return 0; } -static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) +static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) { return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); @@ -708,32 +608,45 @@ static DEFINE_RAW_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling + * @drv: cpuidle driver * @pr: Target processor * @cx: Target state context - * @timer_bc: Whether or not to change timer mode to broadcast + * @index: index of target state */ -static void acpi_idle_enter_bm(struct acpi_processor *pr, - struct acpi_processor_cx *cx, bool timer_bc) +static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, + struct acpi_processor *pr, + struct acpi_processor_cx *cx, + int index) { - acpi_unlazy_tlb(smp_processor_id()); - - /* - * Must be done before busmaster disable as we might need to - * access HPET ! - */ - if (timer_bc) - lapic_timer_state_broadcast(pr, cx, 1); + static struct acpi_processor_cx safe_cx = { + .entry_method = ACPI_CSTATE_HALT, + }; /* * disable bus master * bm_check implies we need ARB_DIS * bm_control implies whether we can do ARB_DIS * - * That leaves a case where bm_check is set and bm_control is - * not set. In that case we cannot do much, we enter C3 - * without doing anything. + * That leaves a case where bm_check is set and bm_control is not set. + * In that case we cannot do much, we enter C3 without doing anything. */ - if (pr->flags.bm_control) { + bool dis_bm = pr->flags.bm_control; + + instrumentation_begin(); + + /* If we can skip BM, demote to a safe state. */ + if (!cx->bm_sts_skip && acpi_idle_bm_check()) { + dis_bm = false; + index = drv->safe_state_index; + if (index >= 0) { + cx = this_cpu_read(acpi_cstate[index]); + } else { + cx = &safe_cx; + index = -EBUSY; + } + } + + if (dis_bm) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ @@ -742,21 +655,26 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, raw_spin_unlock(&c3_lock); } + ct_cpuidle_enter(); + acpi_idle_do_entry(cx); + ct_cpuidle_exit(); + /* Re-enable bus master arbitration */ - if (pr->flags.bm_control) { + if (dis_bm) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } - if (timer_bc) - lapic_timer_state_broadcast(pr, cx, 0); + instrumentation_end(); + + return index; } -static int acpi_idle_enter(struct cpuidle_device *dev, +static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -767,37 +685,26 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return -EINVAL; if (cx->type != ACPI_STATE_C1) { + if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) + return acpi_idle_enter_bm(drv, pr, cx, index); + + /* C2 to C1 demotion. */ if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { index = ACPI_IDLE_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); - } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { - if (cx->bm_sts_skip || !acpi_idle_bm_check()) { - acpi_idle_enter_bm(pr, cx, true); - return index; - } else if (drv->safe_state_index >= 0) { - index = drv->safe_state_index; - cx = per_cpu(acpi_cstate[index], dev->cpu); - } else { - acpi_safe_halt(); - return -EBUSY; - } } } - lapic_timer_state_broadcast(pr, cx, 1); - if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); acpi_idle_do_entry(cx); - lapic_timer_state_broadcast(pr, cx, 0); - return index; } -static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -805,20 +712,28 @@ static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct acpi_processor *pr = __this_cpu_read(processors); if (unlikely(!pr)) - return; + return 0; if (pr->flags.bm_check) { - acpi_idle_enter_bm(pr, cx, false); - return; + u8 bm_sts_skip = cx->bm_sts_skip; + + /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */ + cx->bm_sts_skip = 1; + acpi_idle_enter_bm(drv, pr, cx, index); + cx->bm_sts_skip = bm_sts_skip; + + return 0; } else { ACPI_FLUSH_CPU_CACHE(); } } acpi_idle_do_entry(cx); + + return 0; } -static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, - struct cpuidle_device *dev) +static void acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, + struct cpuidle_device *dev) { int i, count = ACPI_IDLE_STATE_START; struct acpi_processor_cx *cx; @@ -838,14 +753,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, if (count == CPUIDLE_STATE_MAX) break; } - - if (!count) - return -EINVAL; - - return 0; } -static int acpi_processor_setup_cstates(struct acpi_processor *pr) +static void acpi_processor_setup_cstates(struct acpi_processor *pr) { int i, count; struct acpi_processor_cx *cx; @@ -870,16 +780,18 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); - strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; state->target_residency = cx->latency * latency_factor; state->enter = acpi_idle_enter; state->flags = 0; - if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { - state->enter_dead = acpi_idle_play_dead; + + state->enter_dead = acpi_idle_play_dead; + + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) drv->safe_state_index = count; - } + /* * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not @@ -890,22 +802,25 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) state->enter_s2idle = acpi_idle_enter_s2idle; + if (lapic_timer_needs_broadcast(pr, cx)) + state->flags |= CPUIDLE_FLAG_TIMER_STOP; + + if (cx->type == ACPI_STATE_C3) { + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + if (pr->flags.bm_check) + state->flags |= CPUIDLE_FLAG_RCU_IDLE; + } + count++; if (count == CPUIDLE_STATE_MAX) break; } drv->state_count = count; - - if (!count) - return -EINVAL; - - return 0; } static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -913,17 +828,14 @@ static inline void acpi_processor_cstate_first_run_checks(void) dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) - pr_notice("ACPI: processor limited to max C-state %d\n", - max_cstate); + pr_notice("processor limited to max C-state %d\n", max_cstate); + first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else @@ -975,7 +887,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle, status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); + acpi_handle_debug(handle, "No _LPI, giving up\n"); return -ENODEV; } @@ -1041,7 +953,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle, obj = pkg_elem + 9; if (obj->type == ACPI_TYPE_STRING) - strlcpy(lpi_state->desc, obj->string.pointer, + strscpy(lpi_state->desc, obj->string.pointer, ACPI_CX_DESC_LEN); lpi_state->index = state_idx; @@ -1074,11 +986,6 @@ end: return ret; } -/* - * flat_state_cnt - the number of composite LPI states after the process of flattening - */ -static int flat_state_cnt; - /** * combine_lpi_states - combine local and parent LPI states to form a composite LPI state * @@ -1107,7 +1014,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local, result->arch_flags = parent->arch_flags; result->index = parent->index; - strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); + strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN); strlcat(result->desc, "+", ACPI_CX_DESC_LEN); strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); return true; @@ -1121,9 +1028,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level, curr_level->composite_states[curr_level->composite_states_size++] = t; } -static int flatten_lpi_states(struct acpi_processor *pr, - struct acpi_lpi_states_array *curr_level, - struct acpi_lpi_states_array *prev_level) +static unsigned int flatten_lpi_states(struct acpi_processor *pr, + unsigned int flat_state_cnt, + struct acpi_lpi_states_array *curr_level, + struct acpi_lpi_states_array *prev_level) { int i, j, state_count = curr_level->size; struct acpi_lpi_state *p, *t = curr_level->entries; @@ -1163,7 +1071,12 @@ static int flatten_lpi_states(struct acpi_processor *pr, } kfree(curr_level->entries); - return 0; + return flat_state_cnt; +} + +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; } static int acpi_processor_get_lpi_info(struct acpi_processor *pr) @@ -1173,6 +1086,12 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) acpi_handle handle = pr->handle, pr_ahandle; struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + unsigned int state_count; + + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1180,18 +1099,20 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) if (!acpi_has_method(handle, "_LPI")) return -EINVAL; - flat_state_cnt = 0; prev = &info[0]; curr = &info[1]; handle = pr->handle; ret = acpi_processor_evaluate_lpi(handle, prev); if (ret) return ret; - flatten_lpi_states(pr, prev, NULL); + state_count = flatten_lpi_states(pr, 0, prev, NULL); status = acpi_get_parent(handle, &pr_ahandle); while (ACPI_SUCCESS(status)) { - acpi_bus_get_device(pr_ahandle, &d); + d = acpi_fetch_acpi_dev(pr_ahandle); + if (!d) + break; + handle = pr_ahandle; if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) @@ -1206,18 +1127,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) break; /* flatten all the LPI states in this level of hierarchy */ - flatten_lpi_states(pr, curr, prev); + state_count = flatten_lpi_states(pr, state_count, curr, prev); tmp = prev, prev = curr, curr = tmp; status = acpi_get_parent(handle, &pr_ahandle); } - pr->power.count = flat_state_cnt; /* reset the index after flattening */ - for (i = 0; i < pr->power.count; i++) + for (i = 0; i < state_count; i++) pr->power.lpi_states[i].index = i; + pr->power.count = state_count; + /* Tell driver that _LPI is supported. */ pr->flags.has_lpi = 1; pr->flags.power = 1; @@ -1225,11 +1147,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; @@ -1276,11 +1193,12 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) state = &drv->states[i]; snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); - strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); + strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); state->exit_latency = lpi->wake_latency; state->target_residency = lpi->min_residency; - if (lpi->arch_flags) - state->flags |= CPUIDLE_FLAG_TIMER_STOP; + state->flags |= arch_get_idle_state_flags(lpi->arch_flags); + if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) + state->flags |= CPUIDLE_FLAG_RCU_IDLE; state->enter = acpi_idle_lpi_enter; drv->safe_state_index = i; } @@ -1313,7 +1231,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) if (pr->flags.has_lpi) return acpi_processor_setup_lpi_states(pr); - return acpi_processor_setup_cstates(pr); + acpi_processor_setup_cstates(pr); + return 0; } /** @@ -1333,7 +1252,8 @@ static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, if (pr->flags.has_lpi) return acpi_processor_ffh_lpi_probe(pr->id); - return acpi_processor_setup_cpuidle_cx(pr, dev); + acpi_processor_setup_cpuidle_cx(pr, dev); + return 0; } static int acpi_processor_get_power_info(struct acpi_processor *pr) @@ -1392,7 +1312,7 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr) if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { /* Protect against cpu-hotplug */ - get_online_cpus(); + cpus_read_lock(); cpuidle_pause_and_lock(); /* Disable all cpuidle devices */ @@ -1421,7 +1341,7 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr) } } cpuidle_resume_and_unlock(); - put_online_cpus(); + cpus_read_unlock(); } return 0; @@ -1472,6 +1392,9 @@ int acpi_processor_power_init(struct acpi_processor *pr) if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + per_cpu(acpi_cpuidle_device, pr->id) = NULL; + kfree(dev); return retval; } acpi_processor_registered++; @@ -1491,8 +1414,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr) acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + kfree(dev); } pr->flags.power_setup_done = 0; return 0; } + +MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE"); diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 813f1b78c16a..994091bd52de 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -9,63 +9,20 @@ #define pr_fmt(fmt) "ACPI: " fmt -#include <linux/dmi.h> #include <linux/slab.h> #include <linux/acpi.h> #include <acpi/processor.h> #include "internal.h" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_pdc"); - -static bool __init processor_physically_present(acpi_handle handle) -{ - int cpuid, type; - u32 acpi_id; - acpi_status status; - acpi_object_type acpi_type; - unsigned long long tmp; - union acpi_object object = { 0 }; - struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - - status = acpi_get_type(handle, &acpi_type); - if (ACPI_FAILURE(status)) - return false; - - switch (acpi_type) { - case ACPI_TYPE_PROCESSOR: - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) - return false; - acpi_id = object.processor.proc_id; - break; - case ACPI_TYPE_DEVICE: - status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); - if (ACPI_FAILURE(status)) - return false; - acpi_id = tmp; - break; - default: - return false; - } - - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; - cpuid = acpi_get_cpuid(handle, type, acpi_id); - - return !invalid_logical_cpuid(cpuid); -} - static void acpi_set_pdc_bits(u32 *buf) { buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; - - /* Enable coordination with firmware's _TSD info */ - buf[2] = ACPI_PDC_SMP_T_SWCOORD; + buf[2] = 0; /* Twiddle arch-specific bits needed for _PDC */ - arch_acpi_set_pdc_bits(buf); + arch_acpi_set_proc_cap_bits(&buf[2]); } static struct acpi_object_list *acpi_processor_alloc_pdc(void) @@ -115,25 +72,11 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) { acpi_status status = AE_OK; - if (boot_option_idle_override == IDLE_NOMWAIT) { - /* - * If mwait is disabled for CPU C-states, the C2C3_FFH access - * mode will be disabled in the parameter of _PDC object. - * Of course C1_FFH access mode will also be disabled. - */ - union acpi_object *obj; - u32 *buffer = NULL; - - obj = pdc_in->pointer; - buffer = (u32 *)(obj->buffer.pointer); - buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); - - } status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL); if (ACPI_FAILURE(status)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Could not evaluate _PDC, using legacy perf. control.\n")); + acpi_handle_debug(handle, + "Could not evaluate _PDC, using legacy perf control\n"); return status; } @@ -166,36 +109,9 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } -static int __init set_no_mwait(const struct dmi_system_id *id) -{ - pr_notice("%s detected - disabling mwait for CPU C-states\n", - id->ident); - boot_option_idle_override = IDLE_NOMWAIT; - return 0; -} - -static const struct dmi_system_id processor_idle_dmi_table[] __initconst = { - { - set_no_mwait, "Extensa 5220", { - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), - DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, - {}, -}; - -static void __init processor_dmi_check(void) -{ - /* - * Check whether the system is DMI table. If yes, OSPM - * should not use mwait for CPU-states. - */ - dmi_check_system(processor_idle_dmi_table); -} - void __init acpi_early_processor_set_pdc(void) { - processor_dmi_check(); + acpi_proc_quirk_mwait_check(); acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index a303fd0e108c..8972446b7162 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) * @@ -6,22 +7,10 @@ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support - * - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -31,16 +20,10 @@ #include <acpi/processor.h> #ifdef CONFIG_X86 #include <asm/cpufeature.h> +#include <asm/msr.h> #endif -#define PREFIX "ACPI: " - -#define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_perflib"); - -static DEFINE_MUTEX(performance_mutex); /* * _PPC support is implemented as a CPUfreq policy notifier: @@ -63,57 +46,15 @@ module_param(ignore_ppc, int, 0644); MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ "limited by BIOS, this should help"); -#define PPC_REGISTERED 1 -#define PPC_IN_USE 2 - -static int acpi_processor_ppc_status; - -static int acpi_processor_ppc_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct acpi_processor *pr; - unsigned int ppc = 0; - - if (ignore_ppc < 0) - ignore_ppc = 0; - - if (ignore_ppc) - return 0; - - if (event != CPUFREQ_ADJUST) - return 0; - - mutex_lock(&performance_mutex); - - pr = per_cpu(processors, policy->cpu); - if (!pr || !pr->performance) - goto out; - - ppc = (unsigned int)pr->performance_platform_limit; - - if (ppc >= pr->performance->state_count) - goto out; - - cpufreq_verify_within_limits(policy, 0, - pr->performance->states[ppc]. - core_frequency * 1000); - - out: - mutex_unlock(&performance_mutex); - - return 0; -} - -static struct notifier_block acpi_ppc_notifier_block = { - .notifier_call = acpi_processor_ppc_notifier, -}; +static bool acpi_processor_ppc_in_use; static int acpi_processor_get_platform_limit(struct acpi_processor *pr) { acpi_status status = 0; unsigned long long ppc = 0; - + s32 qos_value; + int index; + int ret; if (!pr) return -EINVAL; @@ -123,19 +64,43 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) * (e.g. 0 = states 0..n; 1 = states 1..n; etc. */ status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); + if (status != AE_NOT_FOUND) { + acpi_processor_ppc_in_use = true; - if (status != AE_NOT_FOUND) - acpi_processor_ppc_status |= PPC_IN_USE; - - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); - return -ENODEV; + if (ACPI_FAILURE(status)) { + acpi_evaluation_failure_warn(pr->handle, "_PPC", status); + return -ENODEV; + } } + index = ppc; + + if (pr->performance_platform_limit == index || + ppc >= pr->performance->state_count) + return 0; + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, - (int)ppc, ppc ? "" : "not"); + index, index ? "is" : "is not"); + + pr->performance_platform_limit = index; + + if (unlikely(!freq_qos_request_active(&pr->perflib_req))) + return 0; - pr->performance_platform_limit = (int)ppc; + /* + * If _PPC returns 0, it means that all of the available states can be + * used ("no limit"). + */ + if (index == 0) + qos_value = FREQ_QOS_MAX_DEFAULT_VALUE; + else + qos_value = pr->performance->states[index].core_frequency * 1000; + + ret = freq_qos_update_request(&pr->perflib_req, qos_value); + if (ret < 0) { + pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", + pr->id, ret); + } return 0; } @@ -145,7 +110,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status * @handle: ACPI processor handle * @status: the status code of _PPC evaluation - * 0: success. OSPM is now using the performance state specificed. + * 0: success. OSPM is now using the performance state specified. * 1: failure. OSPM has not changed the number of P-states in use */ static void acpi_processor_ppc_ost(acpi_handle handle, int status) @@ -181,7 +146,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) acpi_processor_ppc_ost(pr->handle, 0); } if (ret >= 0) - cpufreq_update_policy(pr->id); + cpufreq_update_limits(pr->id); } int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) @@ -191,31 +156,73 @@ int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) pr = per_cpu(processors, cpu); if (!pr || !pr->performance || !pr->performance->state_count) return -ENODEV; + *limit = pr->performance->states[pr->performance_platform_limit]. core_frequency * 1000; return 0; } EXPORT_SYMBOL(acpi_processor_get_bios_limit); -void acpi_processor_ppc_init(void) +void acpi_processor_ignore_ppc_init(void) { - if (!cpufreq_register_notifier - (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) - acpi_processor_ppc_status |= PPC_REGISTERED; - else - printk(KERN_DEBUG - "Warning: Processor Platform Limit not supported.\n"); + if (ignore_ppc < 0) + ignore_ppc = 0; } -void acpi_processor_ppc_exit(void) +void acpi_processor_ppc_init(struct cpufreq_policy *policy) { - if (acpi_processor_ppc_status & PPC_REGISTERED) - cpufreq_unregister_notifier(&acpi_ppc_notifier_block, - CPUFREQ_POLICY_NOTIFIER); + unsigned int cpu; - acpi_processor_ppc_status &= ~PPC_REGISTERED; + if (ignore_ppc == 1) + return; + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + int ret; + + if (!pr) + continue; + + /* + * Reset performance_platform_limit in case there is a stale + * value in it, so as to make it match the "no limit" QoS value + * below. + */ + pr->performance_platform_limit = 0; + + ret = freq_qos_add_request(&policy->constraints, + &pr->perflib_req, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); + if (ret < 0) + pr_err("Failed to add freq constraint for CPU%d (%d)\n", + cpu, ret); + + if (!pr->performance) + continue; + + ret = acpi_processor_get_platform_limit(pr); + if (ret) + pr_err("Failed to update freq constraint for CPU%d (%d)\n", + cpu, ret); + } +} + +void acpi_processor_ppc_exit(struct cpufreq_policy *policy) +{ + unsigned int cpu; + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (pr) + freq_qos_remove_request(&pr->perflib_req); + } } +#ifdef CONFIG_X86 + +static DEFINE_MUTEX(performance_mutex); + static int acpi_processor_get_performance_control(struct acpi_processor *pr) { int result = 0; @@ -224,17 +231,15 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr) union acpi_object *pct = NULL; union acpi_object obj = { 0 }; - status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); + acpi_evaluation_failure_warn(pr->handle, "_PCT", status); return -ENODEV; } pct = (union acpi_object *)buffer.pointer; - if (!pct || (pct->type != ACPI_TYPE_PACKAGE) - || (pct->package.count != 2)) { - printk(KERN_ERR PREFIX "Invalid _PCT data\n"); + if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) { + pr_err("Invalid _PCT data\n"); result = -EFAULT; goto end; } @@ -245,10 +250,9 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr) obj = pct->package.elements[0]; - if ((obj.type != ACPI_TYPE_BUFFER) - || (obj.buffer.length < sizeof(struct acpi_pct_register)) - || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); + if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER || + obj.buffer.length < sizeof(struct acpi_pct_register)) { + pr_err("Invalid _PCT data (control_register)\n"); result = -EFAULT; goto end; } @@ -261,10 +265,9 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr) obj = pct->package.elements[1]; - if ((obj.type != ACPI_TYPE_BUFFER) - || (obj.buffer.length < sizeof(struct acpi_pct_register)) - || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); + if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER || + obj.buffer.length < sizeof(struct acpi_pct_register)) { + pr_err("Invalid _PCT data (status_register)\n"); result = -EFAULT; goto end; } @@ -272,13 +275,12 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr) memcpy(&pr->performance->status_register, obj.buffer.pointer, sizeof(struct acpi_pct_register)); - end: +end: kfree(buffer.pointer); return result; } -#ifdef CONFIG_X86 /* * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding * in their ACPI data. Calculate the real values and fix up the _PSS data. @@ -291,8 +293,8 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return; - if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) - || boot_cpu_data.x86 == 0x11) { + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || + boot_cpu_data.x86 == 0x11) { rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); /* * MSR C001_0064+: @@ -309,9 +311,6 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i) px->core_frequency = (100 * (fid + 8)) >> did; } } -#else -static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; -#endif static int acpi_processor_get_performance_states(struct acpi_processor *pr) { @@ -324,22 +323,21 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) int i; int last_invalid = -1; - status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); + acpi_evaluation_failure_warn(pr->handle, "_PSS", status); return -ENODEV; } pss = buffer.pointer; - if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _PSS data\n"); + if (!pss || pss->type != ACPI_TYPE_PACKAGE) { + pr_err("Invalid _PSS data\n"); result = -EFAULT; goto end; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", - pss->package.count)); + acpi_handle_debug(pr->handle, "Found %d performance states\n", + pss->package.count); pr->performance->state_count = pss->package.count; pr->performance->states = @@ -358,12 +356,13 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) state.length = sizeof(struct acpi_processor_px); state.pointer = px; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); + acpi_handle_debug(pr->handle, "Extracting state %d\n", i); status = acpi_extract_package(&(pss->package.elements[i]), &format, &state); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); + acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n", + acpi_format_exception(status)); result = -EFAULT; kfree(pr->performance->states); goto end; @@ -371,22 +370,21 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) amd_fixup_frequency(px, i); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", i, (u32) px->core_frequency, (u32) px->power, (u32) px->transition_latency, (u32) px->bus_master_latency, - (u32) px->control, (u32) px->status)); + (u32) px->control, (u32) px->status); /* - * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq + * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq */ if (!px->core_frequency || - ((u32)(px->core_frequency * 1000) != - (px->core_frequency * 1000))) { - printk(KERN_ERR FW_BUG PREFIX + (u32)(px->core_frequency * 1000) != px->core_frequency * 1000) { + pr_err(FW_BUG "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", pr->id, px->core_frequency); if (last_invalid == -1) @@ -404,8 +402,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) } if (last_invalid == 0) { - printk(KERN_ERR FW_BUG PREFIX - "No valid BIOS _PSS frequency found for processor %d\n", pr->id); + pr_err(FW_BUG + "No valid BIOS _PSS frequency found for processor %d\n", pr->id); result = -EFAULT; kfree(pr->performance->states); pr->performance->states = NULL; @@ -414,7 +412,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) if (last_invalid > 0) pr->performance->state_count = last_invalid; - end: +end: kfree(buffer.pointer); return result; @@ -428,8 +426,8 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr) return -EINVAL; if (!acpi_has_method(pr->handle, "_PCT")) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "ACPI-based processor performance control unavailable\n")); + acpi_handle_debug(pr->handle, + "ACPI-based processor performance control unavailable\n"); return -ENODEV; } @@ -452,13 +450,11 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr) * the BIOS is older than the CPU and does not know its frequencies */ update_bios: -#ifdef CONFIG_X86 if (acpi_has_method(pr->handle, "_PPC")) { if(boot_cpu_has(X86_FEATURE_EST)) - printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " + pr_warn(FW_BUG "BIOS needs update for CPU " "frequency support\n"); } -#endif return result; } EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); @@ -470,68 +466,66 @@ int acpi_processor_pstate_control(void) if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control) return 0; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Writing pstate_control [0x%x] to smi_command [0x%x]\n", - acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); + pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n", + acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command); status = acpi_os_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.pstate_control, 8); if (ACPI_SUCCESS(status)) return 1; - ACPI_EXCEPTION((AE_INFO, status, - "Failed to write pstate_control [0x%x] to smi_command [0x%x]", - acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); + pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n", + acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command, + acpi_format_exception(status)); return -EIO; } int acpi_processor_notify_smm(struct module *calling_module) { - static int is_done = 0; - int result; + static int is_done; + int result = 0; - if (!(acpi_processor_ppc_status & PPC_REGISTERED)) + if (!acpi_processor_cpufreq_init) return -EBUSY; if (!try_module_get(calling_module)) return -EINVAL; - /* is_done is set to negative if an error occurred, - * and to postitive if _no_ error occurred, but SMM - * was already notified. This avoids double notification - * which might lead to unexpected results... + /* + * is_done is set to negative if an error occurs and to 1 if no error + * occurrs, but SMM has been notified already. This avoids repeated + * notification which might lead to unexpected results. */ - if (is_done > 0) { - module_put(calling_module); - return 0; - } else if (is_done < 0) { - module_put(calling_module); - return is_done; - } + if (is_done != 0) { + if (is_done < 0) + result = is_done; - is_done = -EIO; + goto out_put; + } result = acpi_processor_pstate_control(); - if (!result) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); - module_put(calling_module); - return 0; - } - if (result < 0) { - module_put(calling_module); - return result; + if (result <= 0) { + if (result) { + is_done = result; + } else { + pr_debug("No SMI port or pstate_control\n"); + is_done = 1; + } + goto out_put; } - /* Success. If there's no _PPC, we need to fear nothing, so - * we can allow the cpufreq driver to be rmmod'ed. */ is_done = 1; + /* + * Success. If there _PPC, unloading the cpufreq driver would be risky, + * so disallow it in that case. + */ + if (acpi_processor_ppc_in_use) + return 0; - if (!(acpi_processor_ppc_status & PPC_IN_USE)) - module_put(calling_module); - - return 0; +out_put: + module_put(calling_module); + return result; } - EXPORT_SYMBOL(acpi_processor_notify_smm); int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) @@ -549,14 +543,14 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) } psd = buffer.pointer; - if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _PSD data\n"); + if (!psd || psd->type != ACPI_TYPE_PACKAGE) { + pr_err("Invalid _PSD data\n"); result = -EFAULT; goto end; } if (psd->package.count != 1) { - printk(KERN_ERR PREFIX "Invalid _PSD data\n"); + pr_err("Invalid _PSD data\n"); result = -EFAULT; goto end; } @@ -564,22 +558,21 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) state.length = sizeof(struct acpi_psd_package); state.pointer = pdomain; - status = acpi_extract_package(&(psd->package.elements[0]), - &format, &state); + status = acpi_extract_package(&(psd->package.elements[0]), &format, &state); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Invalid _PSD data\n"); + pr_err("Invalid _PSD data\n"); result = -EFAULT; goto end; } if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { - printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); + pr_err("Unknown _PSD:num_entries\n"); result = -EFAULT; goto end; } if (pdomain->revision != ACPI_PSD_REV0_REVISION) { - printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); + pr_err("Unknown _PSD:revision\n"); result = -EFAULT; goto end; } @@ -587,7 +580,7 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { - printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); + pr_err("Invalid _PSD:coord_type\n"); result = -EFAULT; goto end; } @@ -643,7 +636,6 @@ int acpi_processor_preregister_performance( continue; pr->performance = per_cpu_ptr(performance, i); - cpumask_set_cpu(i, pr->performance->shared_cpu_map); pdomain = &(pr->performance->domain_info); if (acpi_processor_get_psd(pr->handle, pdomain)) { retval = -EINVAL; @@ -654,7 +646,7 @@ int acpi_processor_preregister_performance( goto err_ret; /* - * Now that we have _PSD data from all CPUs, lets setup P-state + * Now that we have _PSD data from all CPUs, lets setup P-state * domain info. */ for_each_possible_cpu(i) { @@ -720,7 +712,7 @@ int acpi_processor_preregister_performance( if (match_pdomain->domain != pdomain->domain) continue; - match_pr->performance->shared_type = + match_pr->performance->shared_type = pr->performance->shared_type; cpumask_copy(match_pr->performance->shared_cpu_map, pr->performance->shared_cpu_map); @@ -737,7 +729,7 @@ err_ret: if (retval) { cpumask_clear(pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map); - pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; + pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE; } pr->performance = NULL; /* Will be set for real in register */ } @@ -749,13 +741,12 @@ err_out: } EXPORT_SYMBOL(acpi_processor_preregister_performance); -int -acpi_processor_register_performance(struct acpi_processor_performance - *performance, unsigned int cpu) +int acpi_processor_register_performance(struct acpi_processor_performance + *performance, unsigned int cpu) { struct acpi_processor *pr; - if (!(acpi_processor_ppc_status & PPC_REGISTERED)) + if (!acpi_processor_cpufreq_init) return -EINVAL; mutex_lock(&performance_mutex); @@ -784,7 +775,6 @@ acpi_processor_register_performance(struct acpi_processor_performance mutex_unlock(&performance_mutex); return 0; } - EXPORT_SYMBOL(acpi_processor_register_performance); void acpi_processor_unregister_performance(unsigned int cpu) @@ -794,18 +784,16 @@ void acpi_processor_unregister_performance(unsigned int cpu) mutex_lock(&performance_mutex); pr = per_cpu(processors, cpu); - if (!pr) { - mutex_unlock(&performance_mutex); - return; - } + if (!pr) + goto unlock; if (pr->performance) kfree(pr->performance->states); + pr->performance = NULL; +unlock: mutex_unlock(&performance_mutex); - - return; } - EXPORT_SYMBOL(acpi_processor_unregister_performance); +#endif diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 59c3a5d1e600..c7b1dc5687ec 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_thermal.c - Passive cooling submodule of the ACPI processor driver * @@ -6,20 +7,6 @@ * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> @@ -30,11 +17,7 @@ #include <acpi/processor.h> #include <linux/uaccess.h> -#define PREFIX "ACPI: " - -#define ACPI_PROCESSOR_CLASS "processor" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_thermal"); +#include "internal.h" #ifdef CONFIG_CPU_FREQ @@ -45,13 +28,21 @@ ACPI_MODULE_NAME("processor_thermal"); */ #define CPUFREQ_THERMAL_MIN_STEP 0 -#define CPUFREQ_THERMAL_MAX_STEP 3 -static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); -static unsigned int acpi_thermal_cpufreq_is_init = 0; +static int cpufreq_thermal_max_step __read_mostly = 3; + +/* + * Minimum throttle percentage for processor_thermal cooling device. + * The processor_thermal driver uses it to calculate the percentage amount by + * which cpu frequency must be reduced for each cooling state. This is also used + * to calculate the maximum number of throttling steps or cooling states. + */ +static int cpufreq_thermal_reduction_pctg __read_mostly = 20; + +static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_step); -#define reduction_pctg(cpu) \ - per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) +#define reduction_step(cpu) \ + per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu)) /* * Emulate "per package data" using per cpu data (which should really be @@ -71,44 +62,22 @@ static int phys_package_first_cpu(int cpu) return 0; } -static int cpu_has_cpufreq(unsigned int cpu) +static bool cpu_has_cpufreq(unsigned int cpu) { - struct cpufreq_policy policy; - if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) + if (!acpi_processor_cpufreq_init) return 0; - return 1; -} - -static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - unsigned long max_freq = 0; - - if (event != CPUFREQ_ADJUST) - goto out; - max_freq = ( - policy->cpuinfo.max_freq * - (100 - reduction_pctg(policy->cpu) * 20) - ) / 100; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); - cpufreq_verify_within_limits(policy, 0, max_freq); - - out: - return 0; + return policy != NULL; } -static struct notifier_block acpi_thermal_cpufreq_notifier_block = { - .notifier_call = acpi_thermal_cpufreq_notifier, -}; - static int cpufreq_get_max_state(unsigned int cpu) { if (!cpu_has_cpufreq(cpu)) return 0; - return CPUFREQ_THERMAL_MAX_STEP; + return cpufreq_thermal_max_step; } static int cpufreq_get_cur_state(unsigned int cpu) @@ -116,17 +85,39 @@ static int cpufreq_get_cur_state(unsigned int cpu) if (!cpu_has_cpufreq(cpu)) return 0; - return reduction_pctg(cpu); + return reduction_step(cpu); +} + +static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr) +{ + unsigned long max_freq; + int ret; + + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); + if (!policy) + return false; + + max_freq = (policy->cpuinfo.max_freq * + (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100; + + ret = freq_qos_update_request(&pr->thermal_req, max_freq); + if (ret < 0) { + pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", + pr->id, ret); + } + + return true; } static int cpufreq_set_cur_state(unsigned int cpu, int state) { + struct acpi_processor *pr; int i; if (!cpu_has_cpufreq(cpu)) return 0; - reduction_pctg(cpu) = state; + reduction_step(cpu) = state; /* * Update all the CPUs in the same package because they all @@ -134,33 +125,79 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) * frequency. */ for_each_online_cpu(i) { - if (topology_physical_package_id(i) == + if (topology_physical_package_id(i) != topology_physical_package_id(cpu)) - cpufreq_update_policy(i); + continue; + + pr = per_cpu(processors, i); + + if (unlikely(!freq_qos_request_active(&pr->thermal_req))) + continue; + + if (!cpufreq_update_thermal_limit(i, pr)) + return -EINVAL; } return 0; } -void acpi_thermal_cpufreq_init(void) +static void acpi_thermal_cpufreq_config(void) { - int i; + int cpufreq_pctg = acpi_arch_thermal_cpufreq_pctg(); + + if (!cpufreq_pctg) + return; - i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - if (!i) - acpi_thermal_cpufreq_is_init = 1; + cpufreq_thermal_reduction_pctg = cpufreq_pctg; + + /* + * Derive the MAX_STEP from minimum throttle percentage so that the reduction + * percentage doesn't end up becoming negative. Also, cap the MAX_STEP so that + * the CPU performance doesn't become 0. + */ + cpufreq_thermal_max_step = (100 / cpufreq_pctg) - 2; } -void acpi_thermal_cpufreq_exit(void) +void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) { - if (acpi_thermal_cpufreq_is_init) - cpufreq_unregister_notifier - (&acpi_thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); + unsigned int cpu; + + acpi_thermal_cpufreq_config(); + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + int ret; + + if (!pr) + continue; + + ret = freq_qos_add_request(&policy->constraints, + &pr->thermal_req, + FREQ_QOS_MAX, INT_MAX); + if (ret < 0) { + pr_err("Failed to add freq constraint for CPU%d (%d)\n", + cpu, ret); + continue; + } - acpi_thermal_cpufreq_is_init = 0; + thermal_cooling_device_update(pr->cdev); + } } +void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) +{ + unsigned int cpu; + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (!pr) + continue; + + freq_qos_remove_request(&pr->thermal_req); + + thermal_cooling_device_update(pr->cdev); + } +} #else /* ! CONFIG_CPU_FREQ */ static int cpufreq_get_max_state(unsigned int cpu) { @@ -186,7 +223,7 @@ static int acpi_processor_max_state(struct acpi_processor *pr) /* * There exists four states according to - * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3 + * cpufreq_thermal_reduction_step. 0, 1, 2, 3 */ max_state += cpufreq_get_max_state(pr->id); if (pr->flags.throttling) @@ -270,3 +307,57 @@ const struct thermal_cooling_device_ops processor_cooling_ops = { .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, }; + +int acpi_processor_thermal_init(struct acpi_processor *pr, + struct acpi_device *device) +{ + int result = 0; + + pr->cdev = thermal_cooling_device_register("Processor", device, + &processor_cooling_ops); + if (IS_ERR(pr->cdev)) { + result = PTR_ERR(pr->cdev); + return result; + } + + dev_dbg(&device->dev, "registered as cooling_device%d\n", + pr->cdev->id); + + result = sysfs_create_link(&device->dev.kobj, + &pr->cdev->device.kobj, + "thermal_cooling"); + if (result) { + dev_err(&device->dev, + "Failed to create sysfs link 'thermal_cooling'\n"); + goto err_thermal_unregister; + } + + result = sysfs_create_link(&pr->cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) { + dev_err(&pr->cdev->device, + "Failed to create sysfs link 'device'\n"); + goto err_remove_sysfs_thermal; + } + + return 0; + +err_remove_sysfs_thermal: + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); +err_thermal_unregister: + thermal_cooling_device_unregister(pr->cdev); + + return result; +} + +void acpi_processor_thermal_exit(struct acpi_processor *pr, + struct acpi_device *device) +{ + if (pr->cdev) { + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&pr->cdev->device.kobj, "device"); + thermal_cooling_device_unregister(pr->cdev); + pr->cdev = NULL; + } +} diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index fbc936cf2025..f9c2bc1d4a3a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_throttling.c - Throttling submodule of the ACPI processor driver * @@ -5,23 +6,11 @@ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * - Added processor hotplug support */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -29,15 +18,12 @@ #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/acpi.h> +#include <linux/uaccess.h> #include <acpi/processor.h> #include <asm/io.h> -#include <linux/uaccess.h> - -#define PREFIX "ACPI: " - -#define ACPI_PROCESSOR_CLASS "processor" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_throttling"); +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif /* ignore_tpc: * 0 -> acpi processor driver doesn't ignore _TPC values @@ -67,7 +53,7 @@ static int __acpi_processor_set_throttling(struct acpi_processor *pr, static int acpi_processor_update_tsd_coord(void) { - int count, count_target; + int count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; @@ -124,7 +110,6 @@ static int acpi_processor_update_tsd_coord(void) /* Validate the Domain info */ count_target = pdomain->num_processors; - count = 1; for_each_possible_cpu(j) { if (i == j) @@ -157,7 +142,6 @@ static int acpi_processor_update_tsd_coord(void) cpumask_set_cpu(j, covered_cpus); cpumask_set_cpu(j, pthrottling->shared_cpu_map); - count++; } for_each_possible_cpu(j) { if (i == j) @@ -210,19 +194,15 @@ err_ret: */ void acpi_processor_throttling_init(void) { - if (acpi_processor_update_tsd_coord()) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Assume no T-state coordination\n")); - } - - return; + if (acpi_processor_update_tsd_coord()) + pr_debug("Assume no T-state coordination\n"); } static int acpi_processor_throttling_notifier(unsigned long event, void *data) { struct throttling_tstate *p_tstate = data; struct acpi_processor *pr; - unsigned int cpu ; + unsigned int cpu; int target_state; struct acpi_processor_limit *p_limit; struct acpi_processor_throttling *p_throttling; @@ -230,12 +210,13 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) cpu = p_tstate->cpu; pr = per_cpu(processors, cpu); if (!pr) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); + pr_debug("Invalid pr pointer\n"); return 0; } if (!pr->flags.throttling) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " - "unsupported on CPU %d\n", cpu)); + acpi_handle_debug(pr->handle, + "Throttling control unsupported on CPU %d\n", + cpu); return 0; } target_state = p_tstate->target_state; @@ -254,14 +235,13 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) if (pr->throttling_platform_limit > target_state) target_state = pr->throttling_platform_limit; if (target_state >= p_throttling->state_count) { - printk(KERN_WARNING - "Exceed the limit of T-state \n"); + pr_warn("Exceed the limit of T-state\n"); target_state = p_throttling->state_count - 1; } p_tstate->target_state = target_state; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" - "target T-state of CPU %d is T%d\n", - cpu, target_state)); + acpi_handle_debug(pr->handle, + "PreChange Event: target T-state of CPU %d is T%d\n", + cpu, target_state); break; case THROTTLING_POSTCHANGE: /* @@ -269,13 +249,12 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) * T-state flag of acpi_processor_throttling. */ p_throttling->state = target_state; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" - "CPU %d is switched to T%d\n", - cpu, target_state)); + acpi_handle_debug(pr->handle, + "PostChange Event: CPU %d is switched to T%d\n", + cpu, target_state); break; default: - printk(KERN_WARNING - "Unsupported Throttling notifier event\n"); + pr_warn("Unsupported Throttling notifier event\n"); break; } @@ -298,9 +277,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TPC", status); + return -ENODEV; } @@ -426,21 +405,21 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) acpi_status status = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *ptc = NULL; - union acpi_object obj = { 0 }; + union acpi_object obj; struct acpi_processor_throttling *throttling; status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_PTC", status); + return -ENODEV; } ptc = (union acpi_object *)buffer.pointer; if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) || (ptc->package.count != 2)) { - printk(KERN_ERR PREFIX "Invalid _PTC data\n"); + pr_err("Invalid _PTC data\n"); result = -EFAULT; goto end; } @@ -454,8 +433,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX - "Invalid _PTC data (control_register)\n"); + pr_err("Invalid _PTC data (control_register)\n"); result = -EFAULT; goto end; } @@ -471,7 +449,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); + pr_err("Invalid _PTC data (status_register)\n"); result = -EFAULT; goto end; } @@ -483,19 +461,19 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((throttling->control_register.bit_width + throttling->control_register.bit_offset) > 32) { - printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); + pr_err("Invalid _PTC control register\n"); result = -EFAULT; goto end; } if ((throttling->status_register.bit_width + throttling->status_register.bit_offset) > 32) { - printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); + pr_err("Invalid _PTC status register\n"); result = -EFAULT; goto end; } - end: +end: kfree(buffer.pointer); return result; @@ -516,21 +494,21 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TSS", status); + return -ENODEV; } tss = buffer.pointer; if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _TSS data\n"); + pr_err("Invalid _TSS data\n"); result = -EFAULT; goto end; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", - tss->package.count)); + acpi_handle_debug(pr->handle, "Found %d throttling states\n", + tss->package.count); pr->throttling.state_count = tss->package.count; pr->throttling.states_tss = @@ -551,27 +529,27 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) state.length = sizeof(struct acpi_processor_tx_tss); state.pointer = tx; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); + acpi_handle_debug(pr->handle, "Extracting state %d\n", i); status = acpi_extract_package(&(tss->package.elements[i]), &format, &state); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); + acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n", + acpi_format_exception(status)); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } if (!tx->freqpercentage) { - printk(KERN_ERR PREFIX - "Invalid _TSS data: freq is zero\n"); + pr_err("Invalid _TSS data: freq is zero\n"); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } } - end: +end: kfree(buffer.pointer); return result; @@ -596,21 +574,21 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TSD", status); + return -ENODEV; } tsd = buffer.pointer; if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } if (tsd->package.count != 1) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } @@ -623,19 +601,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) status = acpi_extract_package(&(tsd->package.elements[0]), &format, &state); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { - printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); + pr_err("Unknown _TSD:num_entries\n"); result = -EFAULT; goto end; } if (pdomain->revision != ACPI_TSD_REV0_REVISION) { - printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); + pr_err("Unknown _TSD:revision\n"); result = -EFAULT; goto end; } @@ -656,7 +634,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } - end: +end: kfree(buffer.pointer); return result; } @@ -712,9 +690,9 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) local_irq_enable(); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Throttling state is T%d (%d%% throttling applied)\n", - state, pr->throttling.states[state].performance)); + state, pr->throttling.states[state].performance); return 0; } @@ -728,13 +706,12 @@ static int acpi_throttling_rdmsr(u64 *value) if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); } else { msr_low = 0; msr_high = 0; rdmsr_safe(MSR_IA32_THERM_CONTROL, - (u32 *)&msr_low , (u32 *) &msr_high); + (u32 *)&msr_low, (u32 *) &msr_high); msr = (msr_high << 32) | msr_low; *value = (u64) msr; ret = 0; @@ -749,8 +726,7 @@ static int acpi_throttling_wrmsr(u64 value) if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); } else { msr = value; wrmsr_safe(MSR_IA32_THERM_CONTROL, @@ -762,15 +738,13 @@ static int acpi_throttling_wrmsr(u64 value) #else static int acpi_throttling_rdmsr(u64 *value) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); return -1; } static int acpi_throttling_wrmsr(u64 value) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); return -1; } #endif @@ -801,7 +775,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr, ret = acpi_throttling_rdmsr(value); break; default: - printk(KERN_ERR PREFIX "Unknown addr space %d\n", + pr_err("Unknown addr space %d\n", (u32) (throttling->status_register.space_id)); } return ret; @@ -834,7 +808,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr, ret = acpi_throttling_wrmsr(value); break; default: - printk(KERN_ERR PREFIX "Unknown addr space %d\n", + pr_err("Unknown addr space %d\n", (u32) (throttling->control_register.space_id)); } return ret; @@ -889,8 +863,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) if (ret >= 0) { state = acpi_get_throttling_state(pr, value); if (state == -1) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Invalid throttling state, reset\n")); + acpi_handle_debug(pr->handle, + "Invalid throttling state, reset\n"); state = 0; ret = __acpi_processor_set_throttling(pr, state, true, true); @@ -910,13 +884,6 @@ static long __acpi_processor_get_throttling(void *data) return pr->throttling.acpi_processor_get_throttling(pr); } -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) -{ - if (direct || (is_percpu_thread() && cpu == smp_processor_id())) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - static int acpi_processor_get_throttling(struct acpi_processor *pr) { if (!pr) @@ -942,15 +909,15 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) int i, step; if (!pr->throttling.address) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); + acpi_handle_debug(pr->handle, "No throttling register\n"); return -EINVAL; } else if (!pr->throttling.duty_width) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); + acpi_handle_debug(pr->handle, "No throttling states\n"); return -EINVAL; } /* TBD: Support duty_cycle values that span bit 4. */ else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { - printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); + pr_warn("duty_cycle spans bit 4\n"); return -EINVAL; } @@ -1036,10 +1003,10 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, local_irq_enable(); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Throttling state set to T%d (%d%%)\n", state, (pr->throttling.states[state].performance ? pr-> - throttling.states[state].performance / 10 : 0))); + throttling.states[state].performance / 10 : 0)); return 0; } @@ -1150,8 +1117,8 @@ static int __acpi_processor_set_throttling(struct acpi_processor *pr, * error message and continue. */ if (!match_pr) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Invalid Pointer for CPU %d\n", i)); + acpi_handle_debug(pr->handle, + "Invalid Pointer for CPU %d\n", i); continue; } /* @@ -1159,9 +1126,8 @@ static int __acpi_processor_set_throttling(struct acpi_processor *pr, * we will report the error message and continue. */ if (!match_pr->flags.throttling) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling Control is unsupported " - "on CPU %d\n", i)); + acpi_handle_debug(pr->handle, + "Throttling Control unsupported on CPU %d\n", i); continue; } @@ -1198,11 +1164,11 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) int result = 0; struct acpi_processor_throttling *pthrottling; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", pr->throttling.address, pr->throttling.duty_offset, - pr->throttling.duty_width)); + pr->throttling.duty_width); /* * Evaluate _PTC, _TSS and _TPC @@ -1210,8 +1176,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) */ if (acpi_processor_get_throttling_control(pr) || acpi_processor_get_throttling_states(pr) || - acpi_processor_get_platform_limit(pr)) - { + acpi_processor_get_platform_limit(pr)) { pr->throttling.acpi_processor_get_throttling = &acpi_processor_get_throttling_fadt; pr->throttling.acpi_processor_set_throttling = @@ -1242,13 +1207,13 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) * used this part. */ if (errata.piix4.throttle) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling not supported on PIIX4 A- or B-step\n")); + acpi_handle_debug(pr->handle, + "Throttling not supported on PIIX4 A- or B-step\n"); return 0; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", - pr->throttling.state_count)); + acpi_handle_debug(pr->handle, "Found %d throttling states\n", + pr->throttling.state_count); pr->flags.throttling = 1; @@ -1263,15 +1228,15 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) goto end; if (pr->throttling.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Disabling throttling (was T%d)\n", - pr->throttling.state)); + pr->throttling.state); result = acpi_processor_set_throttling(pr, 0, false); if (result) goto end; } - end: +end: if (result) pr->flags.throttling = 0; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 77abe0ec4043..18e90067d567 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1,18 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI device specific properties support. * - * Copyright (C) 2014, Intel Corporation + * Copyright (C) 2014 - 2023, Intel Corporation * All rights reserved. * * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Darren Hart <dvhart@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Sakari Ailus <sakari.ailus@linux.intel.com> */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi.h> #include <linux/device.h> #include <linux/export.h> @@ -31,9 +31,14 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, * not defined without a warning. For instance if any of the properties * from different GUID appear in a property list of another, it will be * accepted by the kernel. Firmware validation tools should catch these. + * + * References: + * + * [1] UEFI DSD Guide. + * https://github.com/UEFI/DSD-Guide/blob/main/src/dsd-guide.adoc */ static const guid_t prp_guids[] = { - /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ + /* ACPI _DSD device properties GUID [1]: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01), /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */ @@ -42,91 +47,97 @@ static const guid_t prp_guids[] = { /* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */ GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3, 0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89), + /* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */ + GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d, + 0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7), + /* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */ + GUID_INIT(0x6c501103, 0xc189, 0x4296, + 0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d), + /* Storage device needs D3 GUID: 5025030f-842f-4ab4-a561-99a5189762d0 */ + GUID_INIT(0x5025030f, 0x842f, 0x4ab4, + 0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0), }; +/* ACPI _DSD data subnodes GUID [1]: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ static const guid_t ads_guid = GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6, 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b); +/* ACPI _DSD data buffer GUID [1]: edb12dd0-363d-4085-a3d2-49522ca160c4 */ +static const guid_t buffer_prop_guid = + GUID_INIT(0xedb12dd0, 0x363d, 0x4085, + 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4); + static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, - const union acpi_object *desc, + union acpi_object *desc, struct acpi_device_data *data, struct fwnode_handle *parent); -static bool acpi_extract_properties(const union acpi_object *desc, +static bool acpi_extract_properties(acpi_handle handle, + union acpi_object *desc, struct acpi_device_data *data); -static bool acpi_nondev_subnode_extract(const union acpi_object *desc, +static bool acpi_nondev_subnode_extract(union acpi_object *desc, acpi_handle handle, const union acpi_object *link, struct list_head *list, struct fwnode_handle *parent) { struct acpi_data_node *dn; + acpi_handle scope = NULL; bool result; + if (acpi_graph_ignore_port(handle)) + return false; + dn = kzalloc(sizeof(*dn), GFP_KERNEL); if (!dn) return false; dn->name = link->package.elements[0].string.pointer; - dn->fwnode.ops = &acpi_data_fwnode_ops; + fwnode_init(&dn->fwnode, &acpi_data_fwnode_ops); dn->parent = parent; INIT_LIST_HEAD(&dn->data.properties); INIT_LIST_HEAD(&dn->data.subnodes); - result = acpi_extract_properties(desc, &dn->data); - - if (handle) { - acpi_handle scope; - acpi_status status; + /* + * The scope for the completion of relative pathname segments and + * subnode object lookup is the one of the namespace node (device) + * containing the object that has returned the package. That is, it's + * the scope of that object's parent device. + */ + if (handle) + acpi_get_parent(handle, &scope); - /* - * The scope for the subnode object lookup is the one of the - * namespace node (device) containing the object that has - * returned the package. That is, it's the scope of that - * object's parent. - */ - status = acpi_get_parent(handle, &scope); - if (ACPI_SUCCESS(status) - && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, - &dn->fwnode)) - result = true; - } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data, - &dn->fwnode)) { + /* + * Extract properties from the _DSD-equivalent package pointed to by + * desc and use scope (if not NULL) for the completion of relative + * pathname segments. + * + * The extracted properties will be held in the new data node dn. + */ + result = acpi_extract_properties(scope, desc, &dn->data); + /* + * Look for subnodes in the _DSD-equivalent package pointed to by desc + * and create child nodes of dn if there are any. + */ + if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode)) result = true; - } - - if (result) { - dn->handle = handle; - dn->data.pointer = desc; - list_add_tail(&dn->sibling, list); - return true; - } - - kfree(dn); - acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n"); - return false; -} -static bool acpi_nondev_subnode_data_ok(acpi_handle handle, - const union acpi_object *link, - struct list_head *list, - struct fwnode_handle *parent) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - acpi_status status; - - status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, - ACPI_TYPE_PACKAGE); - if (ACPI_FAILURE(status)) + if (!result) { + kfree(dn); + acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n"); return false; + } - if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, - parent)) - return true; + /* + * This will be NULL if the desc package is embedded in an outer + * _DSD-equivalent package and its scope cannot be determined. + */ + dn->handle = handle; + dn->data.pointer = desc; + list_add_tail(&dn->sibling, list); - ACPI_FREE(buf.pointer); - return false; + return true; } static bool acpi_nondev_subnode_ok(acpi_handle scope, @@ -134,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, struct list_head *list, struct fwnode_handle *parent) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_handle handle; acpi_status status; + /* + * If the scope is unknown, the _DSD-equivalent package being parsed + * was embedded in an outer _DSD-equivalent package as a result of + * direct evaluation of an object pointed to by a reference. In that + * case, using a pathname as the target object pointer is invalid. + */ if (!scope) return false; @@ -145,20 +163,33 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, if (ACPI_FAILURE(status)) return false; - return acpi_nondev_subnode_data_ok(handle, link, list, parent); + status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, + ACPI_TYPE_PACKAGE); + if (ACPI_FAILURE(status)) + return false; + + if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, + parent)) + return true; + + ACPI_FREE(buf.pointer); + return false; } -static int acpi_add_nondev_subnodes(acpi_handle scope, - const union acpi_object *links, - struct list_head *list, - struct fwnode_handle *parent) +static bool acpi_add_nondev_subnodes(acpi_handle scope, + union acpi_object *links, + struct list_head *list, + struct fwnode_handle *parent) { bool ret = false; int i; + /* + * Every element in the links package is expected to represent a link + * to a non-device node in a tree containing device-specific data. + */ for (i = 0; i < links->package.count; i++) { - const union acpi_object *link, *desc; - acpi_handle handle; + union acpi_object *link, *desc; bool result; link = &links->package.elements[i]; @@ -166,26 +197,53 @@ static int acpi_add_nondev_subnodes(acpi_handle scope, if (link->package.count != 2) continue; - /* The first one must be a string. */ + /* The first one (the key) must be a string. */ if (link->package.elements[0].type != ACPI_TYPE_STRING) continue; - /* The second one may be a string, a reference or a package. */ + /* The second one (the target) may be a string or a package. */ switch (link->package.elements[1].type) { case ACPI_TYPE_STRING: + /* + * The string is expected to be a full pathname or a + * pathname segment relative to the given scope. That + * pathname is expected to point to an object returning + * a package that contains _DSD-equivalent information. + */ result = acpi_nondev_subnode_ok(scope, link, list, parent); break; - case ACPI_TYPE_LOCAL_REFERENCE: - handle = link->package.elements[1].reference.handle; - result = acpi_nondev_subnode_data_ok(handle, link, list, - parent); - break; case ACPI_TYPE_PACKAGE: + /* + * This happens when a reference is used in AML to + * point to the target. Since the target is expected + * to be a named object, a reference to it will cause it + * to be avaluated in place and its return package will + * be embedded in the links package at the location of + * the reference. + * + * The target package is expected to contain _DSD- + * equivalent information, but the scope in which it + * is located in the original AML is unknown. Thus + * it cannot contain pathname segments represented as + * strings because there is no way to build full + * pathnames out of them. + */ + acpi_handle_debug(scope, "subnode %s: Unknown scope\n", + link->package.elements[0].string.pointer); desc = &link->package.elements[1]; result = acpi_nondev_subnode_extract(desc, NULL, link, list, parent); break; + case ACPI_TYPE_LOCAL_REFERENCE: + /* + * It is not expected to see any local references in + * the links package because referencing a named object + * should cause it to be evaluated in place. + */ + acpi_handle_info(scope, "subnode %s: Unexpected reference\n", + link->package.elements[0].string.pointer); + fallthrough; default: result = false; break; @@ -197,7 +255,7 @@ static int acpi_add_nondev_subnodes(acpi_handle scope, } static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, - const union acpi_object *desc, + union acpi_object *desc, struct acpi_device_data *data, struct fwnode_handle *parent) { @@ -205,7 +263,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, /* Look for the ACPI data subnodes GUID. */ for (i = 0; i < desc->package.count; i += 2) { - const union acpi_object *guid, *links; + const union acpi_object *guid; + union acpi_object *links; guid = &desc->package.elements[i]; links = &desc->package.elements[i + 1]; @@ -291,8 +350,10 @@ static void acpi_init_of_compatible(struct acpi_device *adev) ret = acpi_dev_get_property(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { - if (adev->parent - && adev->parent->flags.of_compatible_ok) + struct acpi_device *parent; + + parent = acpi_dev_parent(adev); + if (parent && parent->flags.of_compatible_ok) goto out; return; @@ -318,7 +379,7 @@ static bool acpi_is_property_guid(const guid_t *guid) struct acpi_device_properties * acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, - const union acpi_object *properties) + union acpi_object *properties) { struct acpi_device_properties *props; @@ -333,7 +394,147 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, return props; } -static bool acpi_extract_properties(const union acpi_object *desc, +static void acpi_nondev_subnode_tag(acpi_handle handle, void *context) +{ +} + +static void acpi_untie_nondev_subnodes(struct acpi_device_data *data) +{ + struct acpi_data_node *dn; + + list_for_each_entry(dn, &data->subnodes, sibling) { + if (!dn->handle) + continue; + + acpi_detach_data(dn->handle, acpi_nondev_subnode_tag); + + acpi_untie_nondev_subnodes(&dn->data); + } +} + +static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data) +{ + struct acpi_data_node *dn; + + list_for_each_entry(dn, &data->subnodes, sibling) { + acpi_status status; + bool ret; + + if (!dn->handle) + continue; + + status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn); + if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) { + acpi_handle_err(dn->handle, "Can't tag data node\n"); + return false; + } + + ret = acpi_tie_nondev_subnodes(&dn->data); + if (!ret) + return ret; + } + + return true; +} + +static void acpi_data_add_buffer_props(acpi_handle handle, + struct acpi_device_data *data, + union acpi_object *properties) +{ + struct acpi_device_properties *props; + union acpi_object *package; + size_t alloc_size; + unsigned int i; + u32 *count; + + if (check_mul_overflow((size_t)properties->package.count, + sizeof(*package) + sizeof(void *), + &alloc_size) || + check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size, + &alloc_size)) { + acpi_handle_warn(handle, + "can't allocate memory for %u buffer props", + properties->package.count); + return; + } + + props = kvzalloc(alloc_size, GFP_KERNEL); + if (!props) + return; + + props->guid = &buffer_prop_guid; + props->bufs = (void *)(props + 1); + props->properties = (void *)(props->bufs + properties->package.count); + + /* Outer package */ + package = props->properties; + package->type = ACPI_TYPE_PACKAGE; + package->package.elements = package + 1; + count = &package->package.count; + *count = 0; + + /* Inner packages */ + package++; + + for (i = 0; i < properties->package.count; i++) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + union acpi_object *property = &properties->package.elements[i]; + union acpi_object *prop, *obj, *buf_obj; + acpi_status status; + + if (property->type != ACPI_TYPE_PACKAGE || + property->package.count != 2) { + acpi_handle_warn(handle, + "buffer property %u has %u entries\n", + i, property->package.count); + continue; + } + + prop = &property->package.elements[0]; + obj = &property->package.elements[1]; + + if (prop->type != ACPI_TYPE_STRING || + obj->type != ACPI_TYPE_STRING) { + acpi_handle_warn(handle, + "wrong object types %u and %u\n", + prop->type, obj->type); + continue; + } + + status = acpi_evaluate_object_typed(handle, obj->string.pointer, + NULL, &buf, + ACPI_TYPE_BUFFER); + if (ACPI_FAILURE(status)) { + acpi_handle_warn(handle, + "can't evaluate \"%*pE\" as buffer\n", + obj->string.length, + obj->string.pointer); + continue; + } + + package->type = ACPI_TYPE_PACKAGE; + package->package.elements = prop; + package->package.count = 2; + + buf_obj = buf.pointer; + + /* Replace the string object with a buffer object */ + obj->type = ACPI_TYPE_BUFFER; + obj->buffer.length = buf_obj->buffer.length; + obj->buffer.pointer = buf_obj->buffer.pointer; + + props->bufs[i] = buf.pointer; + package++; + (*count)++; + } + + if (*count) + list_add(&props->list, &data->properties); + else + kvfree(props); +} + +static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc, struct acpi_device_data *data) { int i; @@ -343,7 +544,8 @@ static bool acpi_extract_properties(const union acpi_object *desc, /* Look for the device properties GUID. */ for (i = 0; i < desc->package.count; i += 2) { - const union acpi_object *guid, *properties; + const union acpi_object *guid; + union acpi_object *properties; guid = &desc->package.elements[i]; properties = &desc->package.elements[i + 1]; @@ -357,6 +559,12 @@ static bool acpi_extract_properties(const union acpi_object *desc, properties->type != ACPI_TYPE_PACKAGE) break; + if (guid_equal((guid_t *)guid->buffer.pointer, + &buffer_prop_guid)) { + acpi_data_add_buffer_props(scope, data, properties); + continue; + } + if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer)) continue; @@ -403,7 +611,7 @@ void acpi_init_properties(struct acpi_device *adev) if (ACPI_FAILURE(status)) goto out; - if (acpi_extract_properties(buf.pointer, &adev->data)) { + if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) { adev->data.pointer = buf.pointer; if (acpi_of) acpi_init_of_compatible(adev); @@ -415,6 +623,9 @@ void acpi_init_properties(struct acpi_device *adev) if (!adev->data.pointer) { acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n"); ACPI_FREE(buf.pointer); + } else { + if (!acpi_tie_nondev_subnodes(&adev->data)) + acpi_untie_nondev_subnodes(&adev->data); } out: @@ -426,6 +637,22 @@ void acpi_init_properties(struct acpi_device *adev) acpi_extract_apple_properties(adev); } +static void acpi_free_device_properties(struct list_head *list) +{ + struct acpi_device_properties *props, *tmp; + + list_for_each_entry_safe(props, tmp, list, list) { + u32 i; + + list_del(&props->list); + /* Buffer data properties were separately allocated */ + if (props->bufs) + for (i = 0; i < props->properties->package.count; i++) + ACPI_FREE(props->bufs[i]); + kvfree(props); + } +} + static void acpi_destroy_nondev_subnodes(struct list_head *list) { struct acpi_data_node *dn, *next; @@ -438,22 +665,19 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list) wait_for_completion(&dn->kobj_done); list_del(&dn->sibling); ACPI_FREE((void *)dn->data.pointer); + acpi_free_device_properties(&dn->data.properties); kfree(dn); } } void acpi_free_properties(struct acpi_device *adev) { - struct acpi_device_properties *props, *tmp; - + acpi_untie_nondev_subnodes(&adev->data); acpi_destroy_nondev_subnodes(&adev->data.subnodes); ACPI_FREE((void *)adev->data.pointer); adev->data.of_compatible = NULL; adev->data.pointer = NULL; - list_for_each_entry_safe(props, tmp, &adev->data.properties, list) { - list_del(&props->list); - kfree(props); - } + acpi_free_device_properties(&adev->data.properties); } /** @@ -534,7 +758,8 @@ acpi_device_data_of_node(const struct fwnode_handle *fwnode) if (is_acpi_device_node(fwnode)) { const struct acpi_device *adev = to_acpi_device_node(fwnode); return &adev->data; - } else if (is_acpi_data_node(fwnode)) { + } + if (is_acpi_data_node(fwnode)) { const struct acpi_data_node *dn = to_acpi_data_node(fwnode); return &dn->data; } @@ -557,7 +782,7 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, /** * acpi_data_get_property_array - return an ACPI array property with given name - * @adev: ACPI data object to get the property from + * @data: ACPI data object to get the property from * @name: Name of the property * @type: Expected type of array elements * @obj: Location to store a pointer to the property value (if not NULL) @@ -604,59 +829,130 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode, { struct fwnode_handle *child; - /* - * Find first matching named child node of this fwnode. - * For ACPI this will be a data only sub-node. - */ - fwnode_for_each_child_node(fwnode, child) - if (acpi_data_node_match(child, childname)) + fwnode_for_each_child_node(fwnode, child) { + if (is_acpi_data_node(child)) { + if (acpi_data_node_match(child, childname)) + return child; + continue; + } + + if (!strncmp(acpi_device_bid(to_acpi_device_node(child)), + childname, ACPI_NAMESEG_SIZE)) return child; + } return NULL; } -/** - * __acpi_node_get_property_reference - returns handle to the referenced object - * @fwnode: Firmware node to get the property from - * @propname: Name of the property - * @index: Index of the reference to return - * @num_args: Maximum number of arguments after each reference - * @args: Location to store the returned reference with optional arguments - * - * Find property with @name, verifify that it is a package containing at least - * one object reference and if so, store the ACPI device object pointer to the - * target object in @args->adev. If the reference includes arguments, store - * them in the @args->args[] array. - * - * If there's more than one reference in the property value package, @index is - * used to select the one to return. - * - * It is possible to leave holes in the property value set like in the - * example below: - * - * Package () { - * "cs-gpios", - * Package () { - * ^GPIO, 19, 0, 0, - * ^GPIO, 20, 0, 0, - * 0, - * ^GPIO, 21, 0, 0, - * } - * } - * - * Calling this function with index %2 or index %3 return %-ENOENT. If the - * property does not contain any more values %-ENOENT is returned. The NULL - * entry must be single integer and preferably contain value %0. - * - * Return: %0 on success, negative error code on failure. - */ -int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, - const char *propname, size_t index, size_t num_args, - struct fwnode_reference_args *args) +static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode, + const char *nargs_prop) +{ + const struct acpi_device_data *data; + const union acpi_object *obj; + int ret; + + data = acpi_device_data_of_node(fwnode); + if (!data) + return 0; + + ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj); + if (ret) + return 0; + + return obj->integer.value; +} + +static int acpi_get_ref_args(struct fwnode_reference_args *args, + struct fwnode_handle *ref_fwnode, + const char *nargs_prop, + const union acpi_object **element, + const union acpi_object *end, size_t num_args) +{ + u32 nargs = 0, i; + + if (nargs_prop) + num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop); + + /* + * Assume the following integer elements are all args. Stop counting on + * the first reference (possibly represented as a string) or end of the + * package arguments. In case of neither reference, nor integer, return + * an error, we can't parse it. + */ + for (i = 0; (*element) + i < end && i < num_args; i++) { + acpi_object_type type = (*element)[i].type; + + if (type == ACPI_TYPE_LOCAL_REFERENCE || type == ACPI_TYPE_STRING) + break; + + if (type == ACPI_TYPE_INTEGER) + nargs++; + else + return -EINVAL; + } + + if (nargs > NR_FWNODE_REFERENCE_ARGS) + return -EINVAL; + + if (args) { + args->fwnode = ref_fwnode; + args->nargs = nargs; + for (i = 0; i < nargs; i++) + args->args[i] = (*element)[i].integer.value; + } + + (*element) += nargs; + + return 0; +} + +static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *fwnode, + const char *refstring) +{ + acpi_handle scope, handle; + struct acpi_data_node *dn; + struct acpi_device *device; + acpi_status status; + + if (is_acpi_device_node(fwnode)) { + scope = to_acpi_device_node(fwnode)->handle; + } else if (is_acpi_data_node(fwnode)) { + scope = to_acpi_data_node(fwnode)->handle; + } else { + pr_debug("Bad node type for node %pfw\n", fwnode); + return NULL; + } + + status = acpi_get_handle(scope, refstring, &handle); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(scope, "Unable to get an ACPI handle for %s\n", + refstring); + return NULL; + } + + device = acpi_fetch_acpi_dev(handle); + if (device) + return acpi_fwnode_handle(device); + + status = acpi_get_data_full(handle, acpi_nondev_subnode_tag, + (void **)&dn, NULL); + if (ACPI_FAILURE(status) || !dn) { + acpi_handle_debug(handle, "Subnode not found\n"); + return NULL; + } + + return &dn->fwnode; +} + +static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode, + const char *propname, const char *nargs_prop, + unsigned int args_count, unsigned int index, + struct fwnode_reference_args *args) { const union acpi_object *element, *end; const union acpi_object *obj; const struct acpi_device_data *data; + struct fwnode_handle *ref_fwnode; struct acpi_device *device; int ret, idx = 0; @@ -668,34 +964,51 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, if (ret) return ret == -EINVAL ? -ENOENT : -EINVAL; - /* - * The simplest case is when the value is a single reference. Just - * return that reference then. - */ - if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { + switch (obj->type) { + case ACPI_TYPE_LOCAL_REFERENCE: + /* Plain single reference without arguments. */ if (index) + return -ENOENT; + + device = acpi_fetch_acpi_dev(obj->reference.handle); + if (!device) return -EINVAL; - ret = acpi_bus_get_device(obj->reference.handle, &device); - if (ret) - return ret == -ENODEV ? -EINVAL : ret; + if (!args) + return 0; args->fwnode = acpi_fwnode_handle(device); args->nargs = 0; + return 0; - } + case ACPI_TYPE_STRING: + if (index) + return -ENOENT; - /* - * If it is not a single reference, then it is a package of - * references followed by number of ints as follows: - * - * Package () { REF, INT, REF, INT, INT } - * - * The index argument is then used to determine which reference - * the caller wants (along with the arguments). - */ - if (obj->type != ACPI_TYPE_PACKAGE) + ref_fwnode = acpi_parse_string_ref(fwnode, obj->string.pointer); + if (!ref_fwnode) + return -EINVAL; + + args->fwnode = ref_fwnode; + args->nargs = 0; + + return 0; + case ACPI_TYPE_PACKAGE: + /* + * If it is not a single reference, then it is a package of + * references, followed by number of ints as follows: + * + * Package () { REF, INT, REF, INT, INT } + * + * Here, REF may be either a local reference or a string. The + * index argument is then used to determine which reference the + * caller wants (along with the arguments). + */ + break; + default: return -EINVAL; + } + if (index >= obj->package.count) return -ENOENT; @@ -703,62 +1016,47 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, end = element + obj->package.count; while (element < end) { - u32 nargs, i; - - if (element->type == ACPI_TYPE_LOCAL_REFERENCE) { - struct fwnode_handle *ref_fwnode; - - ret = acpi_bus_get_device(element->reference.handle, - &device); - if (ret) + switch (element->type) { + case ACPI_TYPE_LOCAL_REFERENCE: + device = acpi_fetch_acpi_dev(element->reference.handle); + if (!device) return -EINVAL; - nargs = 0; element++; + ret = acpi_get_ref_args(idx == index ? args : NULL, + acpi_fwnode_handle(device), + nargs_prop, &element, end, + args_count); + if (ret < 0) + return ret; - /* - * Find the referred data extension node under the - * referred device node. - */ - for (ref_fwnode = acpi_fwnode_handle(device); - element < end && element->type == ACPI_TYPE_STRING; - element++) { - ref_fwnode = acpi_fwnode_get_named_child_node( - ref_fwnode, element->string.pointer); - if (!ref_fwnode) - return -EINVAL; - } - - /* assume following integer elements are all args */ - for (i = 0; element + i < end && i < num_args; i++) { - int type = element[i].type; - - if (type == ACPI_TYPE_INTEGER) - nargs++; - else if (type == ACPI_TYPE_LOCAL_REFERENCE) - break; - else - return -EINVAL; - } + if (idx == index) + return 0; - if (nargs > NR_FWNODE_REFERENCE_ARGS) + break; + case ACPI_TYPE_STRING: + ref_fwnode = acpi_parse_string_ref(fwnode, + element->string.pointer); + if (!ref_fwnode) return -EINVAL; - if (idx == index) { - args->fwnode = ref_fwnode; - args->nargs = nargs; - for (i = 0; i < nargs; i++) - args->args[i] = element[i].integer.value; + element++; + ret = acpi_get_ref_args(idx == index ? args : NULL, + ref_fwnode, nargs_prop, &element, end, + args_count); + if (ret < 0) + return ret; + if (idx == index) return 0; - } - element += nargs; - } else if (element->type == ACPI_TYPE_INTEGER) { + break; + case ACPI_TYPE_INTEGER: if (idx == index) return -ENOENT; element++; - } else { + break; + default: return -EINVAL; } @@ -767,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, return -ENOENT; } + +/** + * __acpi_node_get_property_reference - returns handle to the referenced object + * @fwnode: Firmware node to get the property from + * @propname: Name of the property + * @index: Index of the reference to return + * @num_args: Maximum number of arguments after each reference + * @args: Location to store the returned reference with optional arguments + * (may be NULL) + * + * Find property with @name, verifify that it is a package containing at least + * one object reference and if so, store the ACPI device object pointer to the + * target object in @args->adev. If the reference includes arguments, store + * them in the @args->args[] array. + * + * If there's more than one reference in the property value package, @index is + * used to select the one to return. + * + * It is possible to leave holes in the property value set like in the + * example below: + * + * Package () { + * "cs-gpios", + * Package () { + * ^GPIO, 19, 0, 0, + * ^GPIO, 20, 0, 0, + * 0, + * ^GPIO, 21, 0, 0, + * } + * } + * + * Calling this function with index %2 or index %3 return %-ENOENT. If the + * property does not contain any more values %-ENOENT is returned. The NULL + * entry must be single integer and preferably contain value %0. + * + * Return: %0 on success, negative error code on failure. + */ +int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *propname, size_t index, + size_t num_args, + struct fwnode_reference_args *args) +{ + return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args); +} EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); static int acpi_data_prop_read_single(const struct acpi_device_data *data, @@ -774,125 +1116,80 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, enum dev_prop_type proptype, void *val) { const union acpi_object *obj; - int ret; + int ret = 0; - if (!val) - return -EINVAL; - - if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) { + if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj); - if (ret) - return ret; + else if (proptype == DEV_PROP_STRING) + ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj); + if (ret) + return ret; - switch (proptype) { - case DEV_PROP_U8: - if (obj->integer.value > U8_MAX) - return -EOVERFLOW; + switch (proptype) { + case DEV_PROP_U8: + if (obj->integer.value > U8_MAX) + return -EOVERFLOW; + if (val) *(u8 *)val = obj->integer.value; - break; - case DEV_PROP_U16: - if (obj->integer.value > U16_MAX) - return -EOVERFLOW; + break; + case DEV_PROP_U16: + if (obj->integer.value > U16_MAX) + return -EOVERFLOW; + if (val) *(u16 *)val = obj->integer.value; - break; - case DEV_PROP_U32: - if (obj->integer.value > U32_MAX) - return -EOVERFLOW; + break; + case DEV_PROP_U32: + if (obj->integer.value > U32_MAX) + return -EOVERFLOW; + if (val) *(u32 *)val = obj->integer.value; - break; - default: + break; + case DEV_PROP_U64: + if (val) *(u64 *)val = obj->integer.value; - break; - } - } else if (proptype == DEV_PROP_STRING) { - ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj); - if (ret) - return ret; - - *(char **)val = obj->string.pointer; - + break; + case DEV_PROP_STRING: + if (val) + *(char **)val = obj->string.pointer; return 1; - } else { - ret = -EINVAL; - } - return ret; -} - -int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, - enum dev_prop_type proptype, void *val) -{ - int ret; - - if (!adev) + default: return -EINVAL; - - ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val); - if (ret < 0 || proptype != ACPI_TYPE_STRING) - return ret; - return 0; -} - -static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val, - size_t nval) -{ - int i; - - for (i = 0; i < nval; i++) { - if (items[i].type != ACPI_TYPE_INTEGER) - return -EPROTO; - if (items[i].integer.value > U8_MAX) - return -EOVERFLOW; - - val[i] = items[i].integer.value; - } - return 0; -} - -static int acpi_copy_property_array_u16(const union acpi_object *items, - u16 *val, size_t nval) -{ - int i; - - for (i = 0; i < nval; i++) { - if (items[i].type != ACPI_TYPE_INTEGER) - return -EPROTO; - if (items[i].integer.value > U16_MAX) - return -EOVERFLOW; - - val[i] = items[i].integer.value; } - return 0; -} -static int acpi_copy_property_array_u32(const union acpi_object *items, - u32 *val, size_t nval) -{ - int i; - - for (i = 0; i < nval; i++) { - if (items[i].type != ACPI_TYPE_INTEGER) - return -EPROTO; - if (items[i].integer.value > U32_MAX) - return -EOVERFLOW; - - val[i] = items[i].integer.value; - } - return 0; + /* When no storage provided return number of available values */ + return val ? 0 : 1; } -static int acpi_copy_property_array_u64(const union acpi_object *items, - u64 *val, size_t nval) -{ - int i; - - for (i = 0; i < nval; i++) { - if (items[i].type != ACPI_TYPE_INTEGER) - return -EPROTO; - - val[i] = items[i].integer.value; - } - return 0; -} +#define acpi_copy_property_array_uint(items, val, nval) \ + ({ \ + typeof(items) __items = items; \ + typeof(val) __val = val; \ + typeof(nval) __nval = nval; \ + size_t i; \ + int ret = 0; \ + \ + for (i = 0; i < __nval; i++) { \ + if (__items->type == ACPI_TYPE_BUFFER) { \ + __val[i] = __items->buffer.pointer[i]; \ + continue; \ + } \ + if (__items[i].type != ACPI_TYPE_INTEGER) { \ + ret = -EPROTO; \ + break; \ + } \ + if (__items[i].integer.value > _Generic(__val, \ + u8 *: U8_MAX, \ + u16 *: U16_MAX, \ + u32 *: U32_MAX, \ + u64 *: U64_MAX)) { \ + ret = -EOVERFLOW; \ + break; \ + } \ + \ + __val[i] = __items[i].integer.value; \ + } \ + ret; \ + }) static int acpi_copy_property_array_string(const union acpi_object *items, char **val, size_t nval) @@ -917,43 +1214,77 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, const union acpi_object *items; int ret; - if (val && nval == 1) { + if (nval == 1 || !val) { ret = acpi_data_prop_read_single(data, propname, proptype, val); - if (ret >= 0) + /* + * The overflow error means that the property is there and it is + * single-value, but its type does not match, so return. + */ + if (ret >= 0 || ret == -EOVERFLOW) return ret; + + /* + * Reading this property as a single-value one failed, but its + * value may still be represented as one-element array, so + * continue. + */ } ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj); + if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) + ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER, + &obj); if (ret) return ret; - if (!val) + if (!val) { + if (obj->type == ACPI_TYPE_BUFFER) + return obj->buffer.length; + return obj->package.count; + } - if (proptype != DEV_PROP_STRING && nval > obj->package.count) - return -EOVERFLOW; - else if (nval <= 0) - return -EINVAL; + switch (proptype) { + case DEV_PROP_STRING: + break; + default: + if (obj->type == ACPI_TYPE_BUFFER) { + if (nval > obj->buffer.length) + return -EOVERFLOW; + } else { + if (nval > obj->package.count) + return -EOVERFLOW; + } + break; + } - items = obj->package.elements; + if (obj->type == ACPI_TYPE_BUFFER) { + if (proptype != DEV_PROP_U8) + return -EPROTO; + items = obj; + } else { + items = obj->package.elements; + } switch (proptype) { case DEV_PROP_U8: - ret = acpi_copy_property_array_u8(items, (u8 *)val, nval); + ret = acpi_copy_property_array_uint(items, (u8 *)val, nval); break; case DEV_PROP_U16: - ret = acpi_copy_property_array_u16(items, (u16 *)val, nval); + ret = acpi_copy_property_array_uint(items, (u16 *)val, nval); break; case DEV_PROP_U32: - ret = acpi_copy_property_array_u32(items, (u32 *)val, nval); + ret = acpi_copy_property_array_uint(items, (u32 *)val, nval); break; case DEV_PROP_U64: - ret = acpi_copy_property_array_u64(items, (u64 *)val, nval); + ret = acpi_copy_property_array_uint(items, (u64 *)val, nval); break; case DEV_PROP_STRING: - ret = acpi_copy_property_array_string( - items, (char **)val, - min_t(u32, nval, obj->package.count)); + nval = min(nval, obj->package.count); + if (nval == 0) + return -ENODATA; + + ret = acpi_copy_property_array_string(items, (char **)val, nval); break; default: ret = -EINVAL; @@ -962,12 +1293,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, return ret; } -int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, - enum dev_prop_type proptype, void *val, size_t nval) -{ - return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL; -} - /** * acpi_node_prop_read - retrieve the value of an ACPI property with given name. * @fwnode: Firmware node to get the property from. @@ -980,57 +1305,65 @@ int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, * of the property. Otherwise, read at most @nval values to the array at the * location pointed to by @val. */ -int acpi_node_prop_read(const struct fwnode_handle *fwnode, - const char *propname, enum dev_prop_type proptype, - void *val, size_t nval) +static int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, enum dev_prop_type proptype, + void *val, size_t nval) { return acpi_data_prop_read(acpi_device_data_of_node(fwnode), propname, proptype, val, nval); } -/** +static int stop_on_next(struct acpi_device *adev, void *data) +{ + struct acpi_device **ret_p = data; + + if (!*ret_p) { + *ret_p = adev; + return 1; + } + + /* Skip until the "previous" object is found. */ + if (*ret_p == adev) + *ret_p = NULL; + + return 0; +} + +/* * acpi_get_next_subnode - Return the next child node handle for a fwnode * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. */ -struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, - struct fwnode_handle *child) +static struct fwnode_handle * +acpi_get_next_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) { - const struct acpi_device *adev = to_acpi_device_node(fwnode); - const struct list_head *head; - struct list_head *next; - - if (!child || is_acpi_device_node(child)) { - struct acpi_device *child_adev; + struct acpi_device *adev = to_acpi_device_node(fwnode); - if (adev) - head = &adev->children; - else - goto nondev; + if ((!child || is_acpi_device_node(child)) && adev) { + struct acpi_device *child_adev = to_acpi_device_node(child); - if (list_empty(head)) - goto nondev; + acpi_dev_for_each_child(adev, stop_on_next, &child_adev); + if (child_adev) + return acpi_fwnode_handle(child_adev); - if (child) { - adev = to_acpi_device_node(child); - next = adev->node.next; - if (next == head) { - child = NULL; - goto nondev; - } - child_adev = list_entry(next, struct acpi_device, node); - } else { - child_adev = list_first_entry(head, struct acpi_device, - node); - } - return acpi_fwnode_handle(child_adev); + child = NULL; } - nondev: if (!child || is_acpi_data_node(child)) { const struct acpi_data_node *data = to_acpi_data_node(fwnode); + const struct list_head *head; + struct list_head *next; struct acpi_data_node *dn; + /* + * We can have a combination of device and data nodes, e.g. with + * hierarchical _DSD properties. Make sure the adev pointer is + * restored before going through data nodes, otherwise we will + * be looking for data_nodes below the last device found instead + * of the common fwnode shared by device_nodes and data_nodes. + */ + adev = to_acpi_device_node(fwnode); if (adev) head = &adev->data.subnodes; else if (data) @@ -1056,6 +1389,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, return NULL; } +/* + * acpi_get_next_present_subnode - Return the next present child node handle + * @fwnode: Firmware node to find the next child node for. + * @child: Handle to one of the device's child nodes or a null handle. + * + * Like acpi_get_next_subnode(), but the device nodes returned by + * acpi_get_next_present_subnode() are guaranteed to be present. + * + * Returns: The fwnode handle of the next present sub-node. + */ +static struct fwnode_handle * +acpi_get_next_present_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + do { + child = acpi_get_next_subnode(fwnode, child); + } while (is_acpi_device_node(child) && + !acpi_device_is_present(to_acpi_device_node(child))); + + return child; +} + /** * acpi_node_get_parent - Return parent fwnode of this fwnode * @fwnode: Firmware node whose parent to get @@ -1063,21 +1418,19 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, * Returns parent node of an ACPI device or data firmware node or %NULL if * not available. */ -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) +static struct fwnode_handle * +acpi_node_get_parent(const struct fwnode_handle *fwnode) { if (is_acpi_data_node(fwnode)) { /* All data nodes have parent pointer so just return that */ return to_acpi_data_node(fwnode)->parent; - } else if (is_acpi_device_node(fwnode)) { - acpi_handle handle, parent_handle; - - handle = to_acpi_device_node(fwnode)->handle; - if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { - struct acpi_device *adev; + } + if (is_acpi_device_node(fwnode)) { + struct acpi_device *parent; - if (!acpi_bus_get_device(parent_handle, &adev)) - return acpi_fwnode_handle(adev); - } + parent = acpi_dev_parent(to_acpi_device_node(fwnode)); + if (parent) + return acpi_fwnode_handle(parent); } return NULL; @@ -1120,7 +1473,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint( if (!prev) { do { - port = fwnode_get_next_child_node(fwnode, port); + port = acpi_get_next_subnode(fwnode, port); /* * The names of the port nodes begin with "port@" * followed by the number of the port node and they also @@ -1138,14 +1491,17 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint( if (!port) return NULL; - endpoint = fwnode_get_next_child_node(port, prev); - while (!endpoint) { - port = fwnode_get_next_child_node(fwnode, port); - if (!port) + do { + endpoint = acpi_get_next_subnode(port, prev); + if (endpoint) break; - if (is_acpi_graph_node(port, "port")) - endpoint = fwnode_get_next_child_node(port, NULL); - } + + prev = NULL; + + do { + port = acpi_get_next_subnode(fwnode, port); + } while (port && !is_acpi_graph_node(port, "port")); + } while (port); /* * The names of the endpoint nodes begin with "endpoint@" followed by @@ -1190,9 +1546,8 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( /** - * acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint - * @fwnode: Endpoint firmware node pointing to a remote device - * @endpoint: Firmware node of remote endpoint is filled here if not %NULL + * acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint + * @__fwnode: Endpoint firmware node pointing to a remote device * * Returns the remote endpoint corresponding to @__fwnode. NULL on error. */ @@ -1233,11 +1588,29 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode) static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode) { if (!is_acpi_device_node(fwnode)) - return false; + return true; return acpi_device_is_present(to_acpi_device_node(fwnode)); } +static const void * +acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, + const struct device *dev) +{ + return acpi_device_get_match_data(dev); +} + +static bool acpi_fwnode_device_dma_supported(const struct fwnode_handle *fwnode) +{ + return acpi_dma_supported(to_acpi_device_node(fwnode)); +} + +static enum dev_dma_attr +acpi_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode) +{ + return acpi_get_dma_attr(to_acpi_device_node(fwnode)); +} + static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { @@ -1281,14 +1654,50 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, val, nval); } -static int -acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode, - const char *prop, const char *nargs_prop, - unsigned int args_count, unsigned int index, - struct fwnode_reference_args *args) +static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode) +{ + const struct acpi_device *adev; + struct fwnode_handle *parent; + + /* Is this the root node? */ + parent = fwnode_get_parent(fwnode); + if (!parent) + return "\\"; + + fwnode_handle_put(parent); + + if (is_acpi_data_node(fwnode)) { + const struct acpi_data_node *dn = to_acpi_data_node(fwnode); + + return dn->name; + } + + adev = to_acpi_device_node(fwnode); + if (WARN_ON(!adev)) + return NULL; + + return acpi_device_bid(adev); +} + +static const char * +acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode) { - return __acpi_node_get_property_reference(fwnode, prop, index, - args_count, args); + struct fwnode_handle *parent; + + /* Is this the root node? */ + parent = fwnode_get_parent(fwnode); + if (!parent) + return ""; + + /* Is this 2nd node from the root? */ + parent = fwnode_get_next_parent(parent); + if (!parent) + return ""; + + fwnode_handle_put(parent); + + /* ACPI device or data node. */ + return "."; } static struct fwnode_handle * @@ -1309,28 +1718,41 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id)) fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id); + fwnode_handle_put(port_fwnode); return 0; } -static const void * -acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, - const struct device *dev) +static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode, + unsigned int index) { - return acpi_device_get_match_data(dev); + struct resource res; + int ret; + + ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res); + if (ret) + return ret; + + return res.start; } #define DECLARE_ACPI_FWNODE_OPS(ops) \ const struct fwnode_operations ops = { \ .device_is_available = acpi_fwnode_device_is_available, \ .device_get_match_data = acpi_fwnode_device_get_match_data, \ + .device_dma_supported = \ + acpi_fwnode_device_dma_supported, \ + .device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \ .property_present = acpi_fwnode_property_present, \ + .property_read_bool = acpi_fwnode_property_present, \ .property_read_int_array = \ acpi_fwnode_property_read_int_array, \ .property_read_string_array = \ acpi_fwnode_property_read_string_array, \ .get_parent = acpi_node_get_parent, \ - .get_next_child_node = acpi_get_next_subnode, \ + .get_next_child_node = acpi_get_next_present_subnode, \ .get_named_child_node = acpi_fwnode_get_named_child_node, \ + .get_name = acpi_fwnode_get_name, \ + .get_name_prefix = acpi_fwnode_get_name_prefix, \ .get_reference_args = acpi_fwnode_get_reference_args, \ .graph_get_next_endpoint = \ acpi_graph_get_next_endpoint, \ @@ -1338,6 +1760,7 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, acpi_graph_get_remote_endpoint, \ .graph_get_port_parent = acpi_fwnode_get_parent, \ .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \ + .irq_get = acpi_fwnode_irq_get, \ }; \ EXPORT_SYMBOL_GPL(ops) diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c index ca707f5b521d..b79b7c99c237 100644 --- a/drivers/acpi/reboot.c +++ b/drivers/acpi/reboot.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/pci.h> #include <linux/acpi.h> #include <acpi/reboot.h> +#include <linux/delay.h> #ifdef CONFIG_PCI static void acpi_pci_reboot(struct acpi_generic_address *rr, u8 reset_value) @@ -62,8 +65,18 @@ void acpi_reboot(void) case ACPI_ADR_SPACE_SYSTEM_MEMORY: case ACPI_ADR_SPACE_SYSTEM_IO: - printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); + pr_debug("ACPI MEMORY or I/O RESET_REG.\n"); acpi_reset(); break; } + + /* + * Some platforms do not shut down immediately after writing to the + * ACPI reset register, and this results in racing with the + * subsequent reboot mechanism. + * + * The 15ms delay has been found to be long enough for the system + * to reboot on the affected platforms. + */ + mdelay(15); } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 316a0fc785e3..d16906f46484 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/resource.c - ACPI device resources interpretation. * @@ -6,15 +7,6 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -24,6 +16,8 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/irq.h> +#include <linux/dmi.h> +#include <linux/string_choices.h> #ifdef CONFIG_X86 #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) @@ -257,6 +251,9 @@ static bool acpi_decode_space(struct resource_win *win, switch (addr->resource_type) { case ACPI_MEMORY_RANGE: acpi_dev_memresource_flags(res, len, wp); + + if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) + res->flags |= IORESOURCE_PREFETCH; break; case ACPI_IO_RANGE: acpi_dev_ioresource_flags(res, len, iodec, @@ -272,9 +269,6 @@ static bool acpi_decode_space(struct resource_win *win, if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; - if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) - res->flags |= IORESOURCE_PREFETCH; - return !(res->flags & IORESOURCE_DISABLED); } @@ -343,8 +337,9 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); * @triggering: Triggering type as provided by ACPI. * @polarity: Interrupt polarity as provided by ACPI. * @shareable: Whether or not the interrupt is shareable. + * @wake_capable: Wake capability as provided by ACPI. */ -unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) +unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable) { unsigned long flags; @@ -358,6 +353,9 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) if (shareable == ACPI_SHARED) flags |= IORESOURCE_IRQ_SHAREABLE; + if (wake_capable == ACPI_WAKE_CAPABLE) + flags |= IORESOURCE_IRQ_WAKECAPABLE; + return flags | IORESOURCE_IRQ; } EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); @@ -381,28 +379,385 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity) case ACPI_ACTIVE_BOTH: if (triggering == ACPI_EDGE_SENSITIVE) return IRQ_TYPE_EDGE_BOTH; - /* fall through */ + fallthrough; default: return IRQ_TYPE_NONE; } } EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type); -static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) +/* + * DMI matches for boards where the DSDT specifies the kbd IRQ as + * level active-low and using the override changes this to rising edge, + * stopping the keyboard from working. + */ +static const struct dmi_system_id irq1_level_low_skip_override[] = { + { + /* MEDION P15651 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_BOARD_NAME, "M15T"), + }, + }, + { + /* MEDION S17405 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_BOARD_NAME, "M17T"), + }, + }, + { + /* MEDION S17413 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_BOARD_NAME, "M1xA"), + }, + }, + { + /* Asus Vivobook K3402ZA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"), + }, + }, + { + /* Asus Vivobook K3502ZA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"), + }, + }, + { + /* Asus Vivobook S5402ZA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"), + }, + }, + { + /* Asus Vivobook S5602ZA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), + }, + }, + { + /* Asus Vivobook X1404VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"), + }, + }, + { + /* Asus Vivobook X1504VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"), + }, + }, + { + /* Asus Vivobook X1704VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"), + }, + }, + { + /* Asus ExpertBook B1402C* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B1402C"), + }, + }, + { + /* Asus ExpertBook B1502C* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B1502C"), + }, + }, + { + /* Asus ExpertBook B2402 (B2402CBA / B2402FBA / B2402CVA / B2402FVA) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2402"), + }, + }, + { + /* Asus ExpertBook B2502 (B2502CBA / B2502FBA / B2502CVA / B2502FVA) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2502"), + }, + }, + { + /* Asus Vivobook Go E1404GA* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "E1404GA"), + }, + }, + { + /* Asus Vivobook E1504GA* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "E1504GA"), + }, + }, + { + /* Asus Vivobook Pro N6506M* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506M"), + }, + }, + { + /* Asus Vivobook Pro N6506CU* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506CU"), + }, + }, + { + /* LG Electronics 17U70P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "17U70P"), + }, + }, + { + /* LG Electronics 16T90SP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "16T90SP"), + }, + }, + { } +}; + +/* + * DMI matches for AMD Zen boards where the DSDT specifies the kbd IRQ + * as falling edge and this must be overridden to rising edge, + * to have a working keyboard. + */ +static const struct dmi_system_id irq1_edge_low_force_override[] = { + { + /* MECHREVO Jiaolong17KS Series GM7XG0M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"), + }, + }, + { + /* XMG APEX 17 (M23) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"), + }, + }, + { + /* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { + /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + }, + { + /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + }, + { + /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "RP-15"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "MECH-17"), + }, + }, + { + /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), + }, + }, + { + /* MAINGEAR Vector Pro 2 15 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), + } + }, + { + /* MAINGEAR Vector Pro 2 17 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"), + }, + }, + { + /* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"), + }, + }, + { + /* TongFang GM6BG5Q, RTX 4050 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"), + }, + }, + { + /* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"), + }, + }, + { + /* Infinity E15-5A165-BM */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"), + }, + }, + { + /* Infinity E15-5A305-1M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"), + }, + }, + { + /* Lunnen Ground 15 / AMD Ryzen 5 5500U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"), + }, + }, + { + /* Lunnen Ground 16 / AMD Ryzen 7 5800U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LL6FA"), + }, + }, + { + /* MAIBENBEN X577 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"), + DMI_MATCH(DMI_BOARD_NAME, "X577"), + }, + }, + { + /* Maibenben X565 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"), + DMI_MATCH(DMI_BOARD_NAME, "X565"), + }, + }, + { + /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), + }, + }, + { + /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + }, + { + /* MACHENIKE L16P/L16P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"), + DMI_MATCH(DMI_BOARD_NAME, "L16P"), + }, + }, + { + /* + * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the + * board-name is changed, so check OEM strings instead. Note + * OEM string matches are always exact matches. + * https://bugzilla.kernel.org/show_bug.cgi?id=219614 + */ + .matches = { + DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"), + }, + }, + { } +}; + +struct irq_override_cmp { + const struct dmi_system_id *system; + unsigned char irq; + unsigned char triggering; + unsigned char polarity; + unsigned char shareable; + bool override; +}; + +static const struct irq_override_cmp override_table[] = { + { irq1_level_low_skip_override, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { irq1_edge_low_force_override, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, +}; + +static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, + u8 shareable) { - res->start = gsi; - res->end = gsi; - res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; + int i; + + for (i = 0; i < ARRAY_SIZE(override_table); i++) { + const struct irq_override_cmp *entry = &override_table[i]; + + if (entry->irq == gsi && + entry->triggering == triggering && + entry->polarity == polarity && + entry->shareable == shareable && + dmi_check_system(entry->system)) + return entry->override; + } + +#ifdef CONFIG_X86 + /* + * Always use the MADT override info, except for the i8042 PS/2 ctrl + * IRQs (1 and 12). For these the DSDT IRQ settings should sometimes + * be used otherwise PS/2 keyboards / mice will not work. + */ + if (gsi != 1 && gsi != 12) + return true; + + /* If the override comes from an INT_SRC_OVR MADT entry, honor it. */ + if (acpi_int_src_ovr[gsi]) + return true; + + /* + * IRQ override isn't needed on modern AMD Zen systems and + * this override breaks active low IRQs on AMD Ryzen 6000 and + * newer systems. Skip it. + */ + if (boot_cpu_has(X86_FEATURE_ZEN)) + return false; +#endif + + return true; } static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, u8 triggering, u8 polarity, u8 shareable, - bool legacy) + u8 wake_capable, bool check_override) { int irq, p, t; if (!valid_IRQ(gsi)) { - acpi_dev_irqresource_disabled(res, gsi); + irqresource_disabled(res, gsi); return; } @@ -416,25 +771,30 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, * using extended IRQ descriptors we take the IRQ configuration * from _CRS directly. */ - if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { + if (check_override && + acpi_dev_irq_override(gsi, triggering, polarity, shareable) && + !acpi_get_override_irq(gsi, &t, &p)) { u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { - pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "level" : "edge", p ? "low" : "high"); + pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, + t ? "level" : "edge", + trig == triggering ? "" : "(!)", + str_low_high(p), + pol == polarity ? "" : "(!)"); triggering = trig; polarity = pol; } } - res->flags = acpi_dev_irq_flags(triggering, polarity, shareable); + res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable); irq = acpi_register_gsi(NULL, gsi, triggering, polarity); if (irq >= 0) { res->start = irq; res->end = irq; } else { - acpi_dev_irqresource_disabled(res, gsi); + irqresource_disabled(res, gsi); } } @@ -471,25 +831,27 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, */ irq = &ares->data.irq; if (index >= irq->interrupt_count) { - acpi_dev_irqresource_disabled(res, 0); + irqresource_disabled(res, 0); return false; } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable, true); + irq->shareable, irq->wake_capable, + true); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; if (index >= ext_irq->interrupt_count) { - acpi_dev_irqresource_disabled(res, 0); + irqresource_disabled(res, 0); return false; } if (is_gsi(ext_irq)) acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable, false); + ext_irq->shareable, ext_irq->wake_capable, + false); else - acpi_dev_irqresource_disabled(res, 0); + irqresource_disabled(res, 0); break; default: res->flags = 0; @@ -549,7 +911,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, ret = c->preproc(ares, c->preproc_data); if (ret < 0) { c->error = ret; - return AE_CTRL_TERMINATE; + return AE_ABORT_METHOD; } else if (ret > 0) { return AE_OK; } @@ -611,7 +973,7 @@ static int __acpi_dev_get_resources(struct acpi_device *adev, * @preproc_data: Pointer passed to the caller's preprocessing routine. * * Evaluate the _CRS method for the given device node and process its output by - * (1) executing the @preproc() rountine provided by the caller, passing the + * (1) executing the @preproc() routine provided by the caller, passing the * resource pointer and @preproc_data to it as arguments, for each ACPI resource * returned and (2) converting all of the returned ACPI resources into struct * resource objects if possible. If the return value of @preproc() in step (1) @@ -643,6 +1005,9 @@ static int is_memory(struct acpi_resource *ares, void *not_used) memset(&win, 0, sizeof(win)); + if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM)) + return 1; + return !(acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win) || acpi_dev_resource_ext_address_space(ares, &win)); @@ -672,6 +1037,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list) EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); /** + * acpi_dev_get_memory_resources - Get current memory resources of a device. + * @adev: ACPI device node to get the resources for. + * @list: Head of the resultant list of resources (must be empty). + * + * This is a helper function that locates all memory type resources of @adev + * with acpi_dev_get_resources(). + * + * The number of resources in the output list is returned on success, an error + * code reflecting the error condition is returned otherwise. + */ +int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list) +{ + return acpi_dev_get_resources(adev, list, is_memory, NULL); +} +EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources); + +/** * acpi_dev_filter_resource_type - Filter ACPI resource according to resource * types * @ares: Input ACPI resource object. @@ -754,9 +1136,9 @@ static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth, { struct resource *res = context; struct acpi_device **consumer = (struct acpi_device **) ret; - struct acpi_device *adev; + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); - if (acpi_bus_get_device(handle, &adev)) + if (!adev) return AE_OK; if (acpi_dev_consumes_res(adev, res)) { diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig new file mode 100644 index 000000000000..046296a18d00 --- /dev/null +++ b/drivers/acpi/riscv/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# ACPI Configuration for RISC-V +# + +config ACPI_RIMT + bool diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile new file mode 100644 index 000000000000..1284a076fa88 --- /dev/null +++ b/drivers/acpi/riscv/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += rhct.o init.o irq.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o +obj-$(CONFIG_ACPI_RIMT) += rimt.o diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c new file mode 100644 index 000000000000..42c1a9052470 --- /dev/null +++ b/drivers/acpi/riscv/cppc.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implement CPPC FFH helper routines for RISC-V. + * + * Copyright (C) 2024 Ventana Micro Systems Inc. + */ + +#include <acpi/cppc_acpi.h> +#include <asm/csr.h> +#include <asm/sbi.h> + +#define SBI_EXT_CPPC 0x43505043 + +/* CPPC interfaces defined in SBI spec */ +#define SBI_CPPC_PROBE 0x0 +#define SBI_CPPC_READ 0x1 +#define SBI_CPPC_READ_HI 0x2 +#define SBI_CPPC_WRITE 0x3 + +/* RISC-V FFH definitions from RISC-V FFH spec */ +#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60) +#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0)) +#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0)) + +#define FFH_CPPC_SBI 0x1 +#define FFH_CPPC_CSR 0x2 + +struct sbi_cppc_data { + u64 val; + u32 reg; + struct sbiret ret; +}; + +static bool cppc_ext_present; + +static int __init sbi_cppc_init(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_CPPC) > 0) { + cppc_ext_present = true; + } else { + cppc_ext_present = false; + } + + return 0; +} +device_initcall(sbi_cppc_init); + +static void sbi_cppc_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ, + data->reg, 0, 0, 0, 0, 0); +} + +static void sbi_cppc_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE, + data->reg, data->val, 0, 0, 0, 0); +} + +static void cppc_ffh_csr_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + switch (data->reg) { + /* Support only TIME CSR for now */ + case CSR_TIME: + data->ret.value = csr_read(CSR_TIME); + data->ret.error = 0; + break; + default: + data->ret.error = -EINVAL; + break; + } +} + +static void cppc_ffh_csr_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret.error = -EINVAL; +} + +/* + * Refer to drivers/acpi/cppc_acpi.c for the description of the functions + * below. + */ +bool cpc_ffh_supported(void) +{ + return true; +} + +int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + + smp_call_function_single(cpu, sbi_cppc_read, &data, 1); + + *val = data.ret.value; + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + + smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1); + + *val = data.ret.value; + + return data.ret.error; + } + + return -EINVAL; +} + +int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + data.val = val; + + smp_call_function_single(cpu, sbi_cppc_write, &data, 1); + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + data.val = val; + + smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1); + + return data.ret.error; + } + + return -EINVAL; +} diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c new file mode 100644 index 000000000000..624f9bbdb58c --- /dev/null +++ b/drivers/acpi/riscv/cpuidle.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + * + */ + +#include <linux/acpi.h> +#include <acpi/processor.h> +#include <linux/cpu_pm.h> +#include <linux/cpuidle.h> +#include <linux/suspend.h> +#include <asm/cpuidle.h> +#include <asm/sbi.h> +#include <asm/suspend.h> + +#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60) +#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32) + +#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60) + +static int acpi_cpu_init_idle(unsigned int cpu) +{ + int i; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + if (!riscv_sbi_hsm_is_supported()) + return -ENODEV; + + if (pr->power.count <= 1) + return -ENODEV; + + for (i = 1; i < pr->power.count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i]; + + /* + * Validate Entry Method as per FFH spec. + * bits[63:60] should be 0x1 + * bits[59:32] should be 0x0 + * bits[31:0] represent a SBI power_state + */ + if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) || + (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) { + pr_warn("Invalid LPI entry method %#llx\n", lpi->address); + return -EINVAL; + } + + state = lpi->address; + if (!riscv_sbi_suspend_state_is_valid(state)) { + pr_warn("Invalid SBI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return acpi_cpu_init_idle(cpu); +} + +int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) + return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); + else + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); +} diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c new file mode 100644 index 000000000000..7c00f7995e86 --- /dev/null +++ b/drivers/acpi/riscv/init.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + */ + +#include <linux/acpi.h> +#include "init.h" + +void __init acpi_arch_init(void) +{ + riscv_acpi_init_gsi_mapping(); + if (IS_ENABLED(CONFIG_ACPI_RIMT)) + riscv_acpi_rimt_init(); +} diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h new file mode 100644 index 000000000000..1680aa2aaf23 --- /dev/null +++ b/drivers/acpi/riscv/init.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include <linux/init.h> + +void __init riscv_acpi_init_gsi_mapping(void); +void __init riscv_acpi_rimt_init(void); diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c new file mode 100644 index 000000000000..d9a2154d6c6a --- /dev/null +++ b/drivers/acpi/riscv/irq.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + */ + +#include <linux/acpi.h> +#include <linux/sort.h> +#include <linux/irq.h> + +#include "init.h" + +#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0) + +struct riscv_ext_intc_list { + acpi_handle handle; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + u32 id; + u32 type; + u32 flag; + struct list_head list; +}; + +struct acpi_irq_dep_ctx { + int rc; + unsigned int index; + acpi_handle handle; +}; + +LIST_HEAD(ext_intc_list); + +static int irqchip_cmp_func(const void *in0, const void *in1) +{ + struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; + struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; + + return (elem0->type > elem1->type) - (elem0->type < elem1->type); +} + +/* + * On RISC-V, RINTC structures in MADT should be probed before any other + * interrupt controller structures and IMSIC before APLIC. The interrupt + * controller subtypes in MADT of ACPI spec for RISC-V are defined in + * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). + * Hence, simply sorting the subtypes in incremental order will + * establish the required order. + */ +void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) +{ + struct acpi_probe_entry *ape = ap_head; + + if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) + return; + sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); +} + +static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i, *tmp; + + list_for_each_safe(i, tmp, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi_base == ext_intc_element->gsi_base) { + ext_intc_element->handle = handle; + return AE_OK; + } + } + + return AE_NOT_FOUND; +} + +int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs) +{ + struct riscv_ext_intc_list *ext_intc_element; + + list_for_each_entry(ext_intc_element, &ext_intc_list, list) { + if (gsi_base == ext_intc_element->gsi_base && + (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) { + ext_intc_element->nr_irqs = nr_irqs; + ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING; + return 0; + } + } + + return -ENODEV; +} + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { + *gsi_base = ext_intc_element->gsi_base; + *id = ext_intc_element->id; + *nr_irqs = ext_intc_element->nr_irqs; + if (nr_idcs) + *nr_idcs = ext_intc_element->nr_idcs; + + return 0; + } + } + + return -ENODEV; +} + +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct acpi_device *adev; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { + adev = acpi_fetch_acpi_dev(ext_intc_element->handle); + if (!adev) + return NULL; + + return acpi_fwnode_handle(adev); + } + } + + return NULL; +} + +static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, + u32 id, u32 type) +{ + struct riscv_ext_intc_list *ext_intc_element, *node, *prev; + + ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); + if (!ext_intc_element) + return -ENOMEM; + + ext_intc_element->gsi_base = gsi_base; + + /* If nr_irqs is zero, indicate it in flag and set to max range possible */ + if (nr_irqs) { + ext_intc_element->nr_irqs = nr_irqs; + } else { + ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING; + ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base; + } + + ext_intc_element->nr_idcs = nr_idcs; + ext_intc_element->id = id; + list_for_each_entry(node, &ext_intc_list, list) { + if (node->gsi_base < ext_intc_element->gsi_base) + break; + } + + /* Adjust the previous node's GSI range if that has pending registration */ + prev = list_prev_entry(node, list); + if (!list_entry_is_head(prev, &ext_intc_list, list)) { + if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING) + prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base; + } + + list_add_tail(&ext_intc_element->list, &node->list); + return 0; +} + +static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + acpi_status status; + u64 gbase; + + if (!acpi_has_method(handle, "_GSB")) { + acpi_handle_err(handle, "_GSB method not found\n"); + return AE_ERROR; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to evaluate _GSB method\n"); + return status; + } + + riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI); + status = riscv_acpi_update_gsi_handle((u32)gbase, handle); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); + return status; + } + + return AE_OK; +} + +static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + acpi_status status; + u64 gbase; + + if (!acpi_has_method(handle, "_GSB")) { + acpi_handle_err(handle, "_GSB method not found\n"); + return AE_ERROR; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to evaluate _GSB method\n"); + return status; + } + + status = riscv_acpi_update_gsi_handle((u32)gbase, handle); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); + return status; + } + + return AE_OK; +} + +static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; + + return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, + aplic->id, ACPI_RISCV_IRQCHIP_APLIC); +} + +static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; + + return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, + plic->id, ACPI_RISCV_IRQCHIP_PLIC); +} + +void __init riscv_acpi_init_gsi_mapping(void) +{ + /* There can be either PLIC or APLIC */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { + acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); + return; + } + + if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) + acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); + + /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */ + acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL); +} + +static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) + return ext_intc_element->handle; + } + + return NULL; +} + +static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) +{ + struct acpi_irq_dep_ctx *ctx = context; + struct acpi_resource_irq *irq; + struct acpi_resource_extended_irq *eirq; + + switch (ares->type) { + case ACPI_RESOURCE_TYPE_IRQ: + irq = &ares->data.irq; + if (ctx->index >= irq->interrupt_count) { + ctx->index -= irq->interrupt_count; + return AE_OK; + } + ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: + eirq = &ares->data.extended_irq; + if (eirq->producer_consumer == ACPI_PRODUCER) + return AE_OK; + + if (ctx->index >= eirq->interrupt_count) { + ctx->index -= eirq->interrupt_count; + return AE_OK; + } + + /* Support GSIs only */ + if (eirq->resource_source.string_length) + return AE_OK; + + ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + +static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) +{ + struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; + + if (!gsi_handle) + return 0; + + acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); + *gsi_handle = ctx.handle; + if (*gsi_handle) + return 1; + + return 0; +} + +static u32 riscv_acpi_add_prt_dep(acpi_handle handle) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_pci_routing_table *entry; + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + acpi_handle link_handle; + acpi_status status; + u32 count = 0; + + status = acpi_get_irq_routing_table(handle, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to get IRQ routing table\n"); + kfree(buffer.pointer); + return 0; + } + + entry = buffer.pointer; + while (entry && (entry->length > 0)) { + if (entry->source[0]) { + acpi_get_handle(handle, entry->source, &link_handle); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = link_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } else { + gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + entry = (struct acpi_pci_routing_table *) + ((unsigned long)entry + entry->length); + } + + kfree(buffer.pointer); + return count; +} + +static u32 riscv_acpi_add_irq_dep(acpi_handle handle) +{ + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + u32 count = 0; + int i; + + for (i = 0; + riscv_acpi_irq_get_dep(handle, i, &gsi_handle); + i++) { + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + return count; +} + +u32 arch_acpi_add_auto_dep(acpi_handle handle) +{ + if (acpi_has_method(handle, "_PRT")) + return riscv_acpi_add_prt_dep(handle); + + return riscv_acpi_add_irq_dep(handle); +} diff --git a/drivers/acpi/riscv/rhct.c b/drivers/acpi/riscv/rhct.c new file mode 100644 index 000000000000..caa2c16e1697 --- /dev/null +++ b/drivers/acpi/riscv/rhct.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022-2023, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + * + */ + +#define pr_fmt(fmt) "ACPI: RHCT: " fmt + +#include <linux/acpi.h> +#include <linux/bits.h> + +static struct acpi_table_rhct *acpi_get_rhct(void) +{ + static struct acpi_table_header *rhct; + acpi_status status; + + /* + * RHCT will be used at runtime on every CPU, so we + * don't need to call acpi_put_table() to release the table mapping. + */ + if (!rhct) { + status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); + if (ACPI_FAILURE(status)) { + pr_warn_once("No RHCT table found\n"); + return NULL; + } + } + + return (struct acpi_table_rhct *)rhct; +} + +/* + * During early boot, the caller should call acpi_get_table() and pass its pointer to + * these functions(and free up later). At run time, since this table can be used + * multiple times, NULL may be passed in order to use the cached table. + */ +int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa) +{ + struct acpi_rhct_node_header *node, *ref_node, *end; + u32 size_hdr = sizeof(struct acpi_rhct_node_header); + u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); + struct acpi_rhct_hart_info *hart_info; + struct acpi_rhct_isa_string *isa_node; + struct acpi_table_rhct *rhct; + u32 *hart_info_node_offset; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + + BUG_ON(acpi_disabled); + + if (!table) { + rhct = acpi_get_rhct(); + if (!rhct) + return -ENOENT; + } else { + rhct = (struct acpi_table_rhct *)table; + } + + end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); + + for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); + node < end; + node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { + if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { + hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); + hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); + if (acpi_cpu_id != hart_info->uid) + continue; + + for (int i = 0; i < hart_info->num_offsets; i++) { + ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, + rhct, hart_info_node_offset[i]); + if (ref_node->type == ACPI_RHCT_NODE_TYPE_ISA_STRING) { + isa_node = ACPI_ADD_PTR(struct acpi_rhct_isa_string, + ref_node, size_hdr); + *isa = isa_node->isa; + return 0; + } + } + } + } + + return -1; +} + +static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct, + struct acpi_rhct_hart_info *hart_info, + u32 *cbom_size, u32 *cboz_size, u32 *cbop_size) +{ + u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); + u32 size_hdr = sizeof(struct acpi_rhct_node_header); + struct acpi_rhct_node_header *ref_node; + struct acpi_rhct_cmo_node *cmo_node; + u32 *hart_info_node_offset; + + hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); + for (int i = 0; i < hart_info->num_offsets; i++) { + ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, + rhct, hart_info_node_offset[i]); + if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) { + cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node, + ref_node, size_hdr); + if (cbom_size && cmo_node->cbom_size <= 30) { + if (!*cbom_size) + *cbom_size = BIT(cmo_node->cbom_size); + else if (*cbom_size != BIT(cmo_node->cbom_size)) + pr_warn("CBOM size is not the same across harts\n"); + } + + if (cboz_size && cmo_node->cboz_size <= 30) { + if (!*cboz_size) + *cboz_size = BIT(cmo_node->cboz_size); + else if (*cboz_size != BIT(cmo_node->cboz_size)) + pr_warn("CBOZ size is not the same across harts\n"); + } + + if (cbop_size && cmo_node->cbop_size <= 30) { + if (!*cbop_size) + *cbop_size = BIT(cmo_node->cbop_size); + else if (*cbop_size != BIT(cmo_node->cbop_size)) + pr_warn("CBOP size is not the same across harts\n"); + } + } + } +} + +/* + * During early boot, the caller should call acpi_get_table() and pass its pointer to + * these functions (and free up later). At run time, since this table can be used + * multiple times, pass NULL so that the table remains in memory. + */ +void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, + u32 *cboz_size, u32 *cbop_size) +{ + u32 size_hdr = sizeof(struct acpi_rhct_node_header); + struct acpi_rhct_node_header *node, *end; + struct acpi_rhct_hart_info *hart_info; + struct acpi_table_rhct *rhct; + + if (acpi_disabled) + return; + + if (table) { + rhct = (struct acpi_table_rhct *)table; + } else { + rhct = acpi_get_rhct(); + if (!rhct) + return; + } + + if (cbom_size) + *cbom_size = 0; + + if (cboz_size) + *cboz_size = 0; + + if (cbop_size) + *cbop_size = 0; + + end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); + for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); + node < end; + node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { + if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { + hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); + acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size, + cboz_size, cbop_size); + } + } +} diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c new file mode 100644 index 000000000000..7f423405e5ef --- /dev/null +++ b/drivers/acpi/riscv/rimt.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + * + */ + +#define pr_fmt(fmt) "ACPI: RIMT: " fmt + +#include <linux/acpi.h> +#include <linux/acpi_rimt.h> +#include <linux/iommu.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "init.h" + +struct rimt_fwnode { + struct list_head list; + struct acpi_rimt_node *rimt_node; + struct fwnode_handle *fwnode; +}; + +static LIST_HEAD(rimt_fwnode_list); +static DEFINE_SPINLOCK(rimt_fwnode_lock); + +#define RIMT_TYPE_MASK(type) (1 << (type)) +#define RIMT_IOMMU_TYPE BIT(0) + +/* Root pointer to the mapped RIMT table */ +static struct acpi_table_header *rimt_table; + +/** + * rimt_set_fwnode() - Create rimt_fwnode and use it to register + * iommu data in the rimt_fwnode_list + * + * @rimt_node: RIMT table node associated with the IOMMU + * @fwnode: fwnode associated with the RIMT node + * + * Returns: 0 on success + * <0 on failure + */ +static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node, + struct fwnode_handle *fwnode) +{ + struct rimt_fwnode *np; + + np = kzalloc(sizeof(*np), GFP_ATOMIC); + + if (WARN_ON(!np)) + return -ENOMEM; + + INIT_LIST_HEAD(&np->list); + np->rimt_node = rimt_node; + np->fwnode = fwnode; + + spin_lock(&rimt_fwnode_lock); + list_add_tail(&np->list, &rimt_fwnode_list); + spin_unlock(&rimt_fwnode_lock); + + return 0; +} + +static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node, + void *context) +{ + acpi_status status = AE_NOT_FOUND; + struct device *dev = context; + + if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) { + struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev; + u16 bdf; + + pdev = to_pci_dev(dev); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) && + bdf == iommu_node->pcie_bdf) { + status = AE_OK; + } else { + status = AE_NOT_FOUND; + } + } else { + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res && res->start == iommu_node->base_address) + status = AE_OK; + else + status = AE_NOT_FOUND; + } + } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + struct acpi_rimt_pcie_rc *pci_rc; + struct pci_bus *bus; + + bus = to_pci_bus(dev); + pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; + + /* + * It is assumed that PCI segment numbers maps one-to-one + * with root complexes. Each segment number can represent only + * one root complex. + */ + status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ? + AE_OK : AE_NOT_FOUND; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_rimt_platform_device *ncomp; + struct device *plat_dev = dev; + struct acpi_device *adev; + + /* + * Walk the device tree to find a device with an + * ACPI companion; there is no point in scanning + * RIMT for a device matching a platform device if + * the device does not have an ACPI companion to + * start with. + */ + do { + adev = ACPI_COMPANION(plat_dev); + if (adev) + break; + + plat_dev = plat_dev->parent; + } while (plat_dev); + + if (!adev) + return status; + + status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); + if (ACPI_FAILURE(status)) { + dev_warn(plat_dev, "Can't get device full path name\n"); + return status; + } + + ncomp = (struct acpi_rimt_platform_device *)node->node_data; + status = !strcmp(ncomp->device_name, buf.pointer) ? + AE_OK : AE_NOT_FOUND; + acpi_os_free(buf.pointer); + } + + return status; +} + +static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type, + void *context) +{ + struct acpi_rimt_node *rimt_node, *rimt_end; + struct acpi_table_rimt *rimt; + int i; + + if (!rimt_table) + return NULL; + + /* Get the first RIMT node */ + rimt = (struct acpi_table_rimt *)rimt_table; + rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt, + rimt->node_offset); + rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, + rimt_table->length); + + for (i = 0; i < rimt->num_nodes; i++) { + if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND, + "RIMT node pointer overflows, bad table!\n")) + return NULL; + + if (rimt_node->type == type && + ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context))) + return rimt_node; + + rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node, + rimt_node->length); + } + + return NULL; +} + +/* + * RISC-V supports IOMMU as a PCI device or a platform device. + * When it is a platform device, there should be a namespace device as + * well along with RIMT. To create the link between RIMT information and + * the platform device, the IOMMU driver should register itself with the + * RIMT module. This is true for PCI based IOMMU as well. + */ +int rimt_iommu_register(struct device *dev) +{ + struct fwnode_handle *rimt_fwnode; + struct acpi_rimt_node *node; + + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); + if (!node) { + pr_err("Could not find IOMMU node in RIMT\n"); + return -ENODEV; + } + + if (dev_is_pci(dev)) { + rimt_fwnode = acpi_alloc_fwnode_static(); + if (!rimt_fwnode) + return -ENOMEM; + + rimt_fwnode->dev = dev; + if (!dev->fwnode) + dev->fwnode = rimt_fwnode; + + rimt_set_fwnode(node, rimt_fwnode); + } else { + rimt_set_fwnode(node, dev->fwnode); + } + + return 0; +} + +#ifdef CONFIG_IOMMU_API + +/** + * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node + * + * @node: RIMT table node to be looked-up + * + * Returns: fwnode_handle pointer on success, NULL on failure + */ +static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) +{ + struct fwnode_handle *fwnode = NULL; + struct rimt_fwnode *curr; + + spin_lock(&rimt_fwnode_lock); + list_for_each_entry(curr, &rimt_fwnode_list, list) { + if (curr->rimt_node == node) { + fwnode = curr->fwnode; + break; + } + } + spin_unlock(&rimt_fwnode_lock); + + return fwnode; +} + +static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node) +{ + struct acpi_rimt_pcie_rc *pci_rc; + + pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; + return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED; +} + +static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid) +{ + struct fwnode_handle *rimt_fwnode; + + if (!node) + return -ENODEV; + + rimt_fwnode = rimt_get_fwnode(node); + + /* + * The IOMMU drivers may not be probed yet. + * Defer the IOMMU configuration + */ + if (!rimt_fwnode) + return -EPROBE_DEFER; + + return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode); +} + +struct rimt_pci_alias_info { + struct device *dev; + struct acpi_rimt_node *node; + const struct iommu_ops *ops; +}; + +static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) +{ + if (rid_in < map->source_id_base || + (rid_in > map->source_id_base + map->num_ids)) + return -ENXIO; + + *rid_out = map->dest_id_base + (rid_in - map->source_id_base); + return 0; +} + +static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node, + u32 *id_out, int index) +{ + struct acpi_rimt_platform_device *plat_node; + u32 id_mapping_offset, num_id_mapping; + struct acpi_rimt_pcie_rc *pci_node; + struct acpi_rimt_id_mapping *map; + struct acpi_rimt_node *parent; + + if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; + id_mapping_offset = pci_node->id_mapping_offset; + num_id_mapping = pci_node->num_id_mappings; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + plat_node = (struct acpi_rimt_platform_device *)&node->node_data; + id_mapping_offset = plat_node->id_mapping_offset; + num_id_mapping = plat_node->num_id_mappings; + } else { + return NULL; + } + + if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping) + return NULL; + + map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, + id_mapping_offset + index * sizeof(*map)); + + /* Firmware bug! */ + if (!map->dest_offset) { + pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", + node, node->type); + return NULL; + } + + parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset); + + if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE || + node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + *id_out = map->dest_id_base; + return parent; + } + + return NULL; +} + +static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node, + u32 id_in, u32 *id_out, + u8 type_mask) +{ + struct acpi_rimt_platform_device *plat_node; + u32 id_mapping_offset, num_id_mapping; + struct acpi_rimt_pcie_rc *pci_node; + u32 id = id_in; + + /* Parse the ID mapping tree to find specified node type */ + while (node) { + struct acpi_rimt_id_mapping *map; + int i, rc = 0; + u32 map_id = id; + + if (RIMT_TYPE_MASK(node->type) & type_mask) { + if (id_out) + *id_out = id; + return node; + } + + if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; + id_mapping_offset = pci_node->id_mapping_offset; + num_id_mapping = pci_node->num_id_mappings; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + plat_node = (struct acpi_rimt_platform_device *)&node->node_data; + id_mapping_offset = plat_node->id_mapping_offset; + num_id_mapping = plat_node->num_id_mappings; + } else { + goto fail_map; + } + + if (!id_mapping_offset || !num_id_mapping) + goto fail_map; + + map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, + id_mapping_offset); + + /* Firmware bug! */ + if (!map->dest_offset) { + pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", + node, node->type); + goto fail_map; + } + + /* Do the ID translation */ + for (i = 0; i < num_id_mapping; i++, map++) { + rc = rimt_id_map(map, node->type, map_id, &id); + if (!rc) + break; + } + + if (i == num_id_mapping) + goto fail_map; + + node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, + rc ? 0 : map->dest_offset); + } + +fail_map: + /* Map input ID to output ID unchanged on mapping failure */ + if (id_out) + *id_out = id_in; + + return NULL; +} + +static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out, + u8 type_mask, int index) +{ + struct acpi_rimt_node *parent; + u32 id; + + parent = rimt_node_get_id(node, &id, index); + if (!parent) + return NULL; + + if (!(RIMT_TYPE_MASK(parent->type) & type_mask)) + parent = rimt_node_map_id(parent, id, id_out, type_mask); + else + if (id_out) + *id_out = id; + + return parent; +} + +static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) +{ + struct rimt_pci_alias_info *info = data; + struct acpi_rimt_node *parent; + u32 deviceid; + + parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE); + return rimt_iommu_xlate(info->dev, parent, deviceid); +} + +static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node) +{ + struct acpi_rimt_node *parent; + int err = -ENODEV, i = 0; + u32 deviceid = 0; + + do { + parent = rimt_node_map_platform_id(node, &deviceid, + RIMT_IOMMU_TYPE, + i++); + + if (parent) + err = rimt_iommu_xlate(dev, parent, deviceid); + } while (parent && !err); + + return err; +} + +static int rimt_plat_iommu_map_id(struct device *dev, + struct acpi_rimt_node *node, + const u32 *in_id) +{ + struct acpi_rimt_node *parent; + u32 deviceid; + + parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE); + if (parent) + return rimt_iommu_xlate(dev, parent, deviceid); + + return -ENODEV; +} + +/** + * rimt_iommu_configure_id - Set-up IOMMU configuration for a device. + * + * @dev: device to configure + * @id_in: optional input id const value pointer + * + * Returns: 0 on success, <0 on failure + */ +int rimt_iommu_configure_id(struct device *dev, const u32 *id_in) +{ + struct acpi_rimt_node *node; + int err = -ENODEV; + + if (dev_is_pci(dev)) { + struct iommu_fwspec *fwspec; + struct pci_bus *bus = to_pci_dev(dev)->bus; + struct rimt_pci_alias_info info = { .dev = dev }; + + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev); + if (!node) + return -ENODEV; + + info.node = node; + err = pci_for_each_dma_alias(to_pci_dev(dev), + rimt_pci_iommu_init, &info); + + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && rimt_pcie_rc_supports_ats(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + } else { + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev); + if (!node) + return -ENODEV; + + err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) : + rimt_plat_iommu_map(dev, node); + } + + return err; +} + +#endif + +void __init riscv_acpi_rimt_init(void) +{ + acpi_status status; + + /* rimt_table will be used at runtime after the rimt init, + * so we don't need to call acpi_put_table() to release + * the RIMT table mapping. + */ + status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) { + const char *msg = acpi_format_exception(status); + + pr_err("Failed to get table, %s\n", msg); + } + + return; + } +} diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 96c5e27967f4..d3edc3bcbf01 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -1,25 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * sbs.c - ACPI Smart Battery System Driver ($Revision: 2.0 $) * * Copyright (c) 2007 Alexey Starikovskiy <astarikovskiy@suse.de> * Copyright (c) 2005-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com> * Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> @@ -36,14 +25,9 @@ #include "sbshc.h" -#define PREFIX "ACPI: " - #define ACPI_SBS_CLASS "sbs" #define ACPI_AC_CLASS "ac_adapter" #define ACPI_SBS_DEVICE_NAME "Smart Battery System" -#define ACPI_SBS_FILE_INFO "info" -#define ACPI_SBS_FILE_STATE "state" -#define ACPI_SBS_FILE_ALARM "alarm" #define ACPI_BATTERY_DIR_NAME "BAT%i" #define ACPI_AC_DIR_NAME "AC0" @@ -93,7 +77,6 @@ struct acpi_battery { u16 spec; u8 id; u8 present:1; - u8 have_sysfs_alarm:1; }; #define to_acpi_battery(x) power_supply_get_drvdata(x) @@ -112,7 +95,7 @@ struct acpi_sbs { #define to_acpi_sbs(x) power_supply_get_drvdata(x) -static int acpi_sbs_remove(struct acpi_device *device); +static void acpi_sbs_remove(struct acpi_device *device); static int acpi_battery_get_state(struct acpi_battery *battery); static inline int battery_scale(int log) @@ -257,11 +240,11 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, return 0; } -static enum power_supply_property sbs_ac_props[] = { +static const enum power_supply_property sbs_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; -static enum power_supply_property sbs_charge_battery_props[] = { +static const enum power_supply_property sbs_charge_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -279,7 +262,7 @@ static enum power_supply_property sbs_charge_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_property sbs_energy_battery_props[] = { +static const enum power_supply_property sbs_energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, @@ -382,7 +365,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery) state_readers[i].mode, ACPI_SBS_BATTERY, state_readers[i].command, - (u8 *)battery + + (u8 *)battery + state_readers[i].offset); if (result) goto end; @@ -478,34 +461,49 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static int acpi_battery_read(struct acpi_battery *battery) { - int result = 0, saved_present = battery->present; + int result, saved_present = battery->present; u16 state; if (battery->sbs->manager_present) { result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&state); - if (!result) - battery->present = state & (1 << battery->id); - state &= 0x0fff; + if (result) + return result; + + battery->present = !!(state & (1 << battery->id)); + if (!battery->present) + return 0; + + /* Masking necessary for Smart Battery Selectors */ + state = 0x0fff; state |= 1 << (battery->id + 12); acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); - } else if (battery->id == 0) - battery->present = 1; - - if (result || !battery->present) - return result; + } else { + if (battery->id == 0) { + battery->present = 1; + } else { + if (!battery->present) + return 0; + } + } if (saved_present != battery->present) { battery->update_time = 0; @@ -525,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery) static int acpi_battery_add(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; int result; battery->id = id; @@ -555,12 +556,8 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) goto end; } - result = device_create_file(&battery->bat->dev, &alarm_attr); - if (result) - goto end; - battery->have_sysfs_alarm = 1; end: - printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", + pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), battery->name, battery->present ? "present" : "absent"); return result; @@ -570,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - if (battery->bat) { - if (battery->have_sysfs_alarm) - device_remove_file(&battery->bat->dev, &alarm_attr); + if (battery->bat) power_supply_unregister(battery->bat); - } } static int acpi_charger_add(struct acpi_sbs *sbs) @@ -593,10 +587,10 @@ static int acpi_charger_add(struct acpi_sbs *sbs) result = PTR_ERR(sbs->charger); sbs->charger = NULL; } - printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n", + pr_info("%s [%s]: AC Adapter [%s] (%s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line"); - end: +end: return result; } @@ -617,7 +611,7 @@ static void acpi_sbs_callback(void *context) if (sbs->charger_exists) { acpi_ac_get_present(sbs); if (sbs->charger_present != saved_charger_state) - kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(sbs->charger); } if (sbs->manager_present) { @@ -629,7 +623,7 @@ static void acpi_sbs_callback(void *context) acpi_battery_read(bat); if (saved_battery_state == bat->present) continue; - kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE); + power_supply_changed(bat->bat); } } } @@ -648,10 +642,10 @@ static int acpi_sbs_add(struct acpi_device *device) mutex_init(&sbs->lock); - sbs->hc = acpi_driver_data(device->parent); + sbs->hc = acpi_driver_data(acpi_dev_parent(device)); sbs->device = device; - strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_SBS_CLASS); + strscpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_SBS_CLASS); device->driver_data = sbs; result = acpi_charger_add(sbs); @@ -674,22 +668,22 @@ static int acpi_sbs_add(struct acpi_device *device) acpi_battery_add(sbs, 0); acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); - end: +end: if (result) acpi_sbs_remove(device); return result; } -static int acpi_sbs_remove(struct acpi_device *device) +static void acpi_sbs_remove(struct acpi_device *device) { struct acpi_sbs *sbs; int id; if (!device) - return -EINVAL; + return; sbs = acpi_driver_data(device); if (!sbs) - return -EINVAL; + return; mutex_lock(&sbs->lock); acpi_smbus_unregister_callback(sbs->hc); for (id = 0; id < MAX_SBS_BAT; ++id) @@ -698,7 +692,6 @@ static int acpi_sbs_remove(struct acpi_device *device) mutex_unlock(&sbs->lock); mutex_destroy(&sbs->lock); kfree(sbs); - return 0; } #ifdef CONFIG_PM_SLEEP @@ -727,26 +720,4 @@ static struct acpi_driver acpi_sbs_driver = { }, .drv.pm = &acpi_sbs_pm, }; - -static int __init acpi_sbs_init(void) -{ - int result = 0; - - if (acpi_disabled) - return -ENODEV; - - result = acpi_bus_register_driver(&acpi_sbs_driver); - if (result < 0) - return -ENODEV; - - return 0; -} - -static void __exit acpi_sbs_exit(void) -{ - acpi_bus_unregister_driver(&acpi_sbs_driver); - return; -} - -module_init(acpi_sbs_init); -module_exit(acpi_sbs_exit); +module_acpi_driver(acpi_sbs_driver); diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 5008ead4609a..1a2bf520be23 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -1,13 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * SMBus driver for ACPI Embedded Controller (v0.1) * * Copyright (c) 2007 Alexey Starikovskiy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation version 2. */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi.h> #include <linux/wait.h> #include <linux/slab.h> @@ -15,8 +14,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include "sbshc.h" - -#define PREFIX "ACPI: " +#include "internal.h" #define ACPI_SMB_HC_CLASS "smbus_host_ctl" #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" @@ -33,7 +31,7 @@ struct acpi_smb_hc { }; static int acpi_smbus_hc_add(struct acpi_device *device); -static int acpi_smbus_hc_remove(struct acpi_device *device); +static void acpi_smbus_hc_remove(struct acpi_device *device); static const struct acpi_device_id sbs_device_ids[] = { {"ACPI0001", 0}, @@ -112,7 +110,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 temp, sz = 0; if (!hc) { - printk(KERN_ERR PREFIX "host controller is not configured\n"); + pr_err("host controller is not configured\n"); return ret; } @@ -179,7 +177,7 @@ int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address, EXPORT_SYMBOL_GPL(acpi_smbus_write); int acpi_smbus_register_callback(struct acpi_smb_hc *hc, - smbus_alarm_callback callback, void *context) + smbus_alarm_callback callback, void *context) { mutex_lock(&hc->lock); hc->callback = callback; @@ -234,18 +232,11 @@ static int smbus_alarm(void *context) case ACPI_SBS_BATTERY: acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_smbus_callback, hc); - default:; } mutex_unlock(&hc->lock); return 0; } -typedef int (*acpi_ec_query_func) (void *data); - -extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, - acpi_handle handle, acpi_ec_query_func func, - void *data); - static int acpi_smbus_hc_add(struct acpi_device *device) { int status; @@ -257,12 +248,12 @@ static int acpi_smbus_hc_add(struct acpi_device *device) status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "error obtaining _EC.\n"); + pr_err("error obtaining _EC.\n"); return -EIO; } - strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS); + strscpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_SMB_HC_CLASS); hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL); if (!hc) @@ -270,7 +261,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device) mutex_init(&hc->lock); init_waitqueue_head(&hc->wait); - hc->ec = acpi_driver_data(device->parent); + hc->ec = acpi_driver_data(acpi_dev_parent(device)); hc->offset = (val >> 8) & 0xff; hc->query_bit = val & 0xff; device->driver_data = hc; @@ -282,21 +273,18 @@ static int acpi_smbus_hc_add(struct acpi_device *device) return 0; } -extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); - -static int acpi_smbus_hc_remove(struct acpi_device *device) +static void acpi_smbus_hc_remove(struct acpi_device *device) { struct acpi_smb_hc *hc; if (!device) - return -EINVAL; + return; hc = acpi_driver_data(device); acpi_ec_remove_query_handler(hc->ec, hc->query_bit); acpi_os_wait_events_complete(); kfree(hc); device->driver_data = NULL; - return 0; } module_acpi_driver(acpi_smb_hc_driver); diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h index 06372a37df10..695c390e2884 100644 --- a/drivers/acpi/sbshc.h +++ b/drivers/acpi/sbshc.h @@ -15,8 +15,6 @@ enum acpi_smb_protocol { SMBUS_BLOCK_PROCESS_CALL = 0xd, }; -static const u8 SMBUS_PEC = 0x80; - enum acpi_sbs_device_addr { ACPI_SBS_CHARGER = 0x9, ACPI_SBS_MANAGER = 0xa, @@ -26,9 +24,9 @@ enum acpi_sbs_device_addr { typedef void (*smbus_alarm_callback)(void *context); extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address, - u8 command, u8 * data); + u8 command, u8 *data); extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address, - u8 command, u8 * data, u8 length); + u8 command, u8 *data, u8 length); extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc, - smbus_alarm_callback callback, void *context); + smbus_alarm_callback callback, void *context); extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 5efd4219f112..416d87f9bd10 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1,35 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * scan.c - support for transforming the ACPI namespace into individual objects */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/acpi_iort.h> +#include <linux/acpi_rimt.h> +#include <linux/acpi_viot.h> +#include <linux/iommu.h> #include <linux/signal.h> #include <linux/kthread.h> #include <linux/dmi.h> -#include <linux/nls.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/platform_data/x86/apple.h> - -#include <asm/pgtable.h> +#include <linux/pgtable.h> +#include <linux/crc32.h> +#include <linux/dma-direct.h> #include "internal.h" - -#define _COMPONENT ACPI_BUS_COMPONENT -ACPI_MODULE_NAME("scan"); -extern struct acpi_device *acpi_root; +#include "sleep.h" #define ACPI_BUS_CLASS "system_bus" #define ACPI_BUS_HID "LNXSYBUS" #define ACPI_BUS_DEVICE_NAME "System Bus" -#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent) - -#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page) +#define INVALID_ACPI_HANDLE ((acpi_handle)ZERO_PAGE(0)) static const char *dummy_hid = "device"; @@ -49,12 +50,6 @@ static DEFINE_MUTEX(acpi_hp_context_lock); */ static u64 spcr_uart_addr; -struct acpi_dep_data { - struct list_head node; - acpi_handle master; - acpi_handle slave; -}; - void acpi_scan_lock_acquire(void) { mutex_lock(&acpi_scan_lock); @@ -79,8 +74,7 @@ void acpi_unlock_hp_context(void) void acpi_initialize_hp_context(struct acpi_device *adev, struct acpi_hotplug_context *hp, - int (*notify)(struct acpi_device *, u32), - void (*uevent)(struct acpi_device *, u32)) + acpi_hp_notify notify, acpi_hp_uevent uevent) { acpi_lock_hp_context(); hp->notify = notify; @@ -140,12 +134,12 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data, void **ret_p) { - struct acpi_device *device = NULL; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_device_physical_node *pn; bool second_pass = (bool)data; acpi_status status = AE_OK; - if (acpi_bus_get_device(handle, &device)) + if (!device) return AE_OK; if (device->handler && !device->handler->hotplug.enabled) { @@ -185,10 +179,10 @@ static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data, static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data, void **ret_p) { - struct acpi_device *device = NULL; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_device_physical_node *pn; - if (acpi_bus_get_device(handle, &device)) + if (!device) return AE_OK; mutex_lock(&device->physical_node_lock); @@ -250,11 +244,85 @@ static int acpi_scan_try_to_offline(struct acpi_device *device) return 0; } +#define ACPI_SCAN_CHECK_FLAG_STATUS BIT(0) +#define ACPI_SCAN_CHECK_FLAG_EJECT BIT(1) + +static int acpi_scan_check_and_detach(struct acpi_device *adev, void *p) +{ + struct acpi_scan_handler *handler = adev->handler; + uintptr_t flags = (uintptr_t)p; + + acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, p); + + if (flags & ACPI_SCAN_CHECK_FLAG_STATUS) { + acpi_bus_get_status(adev); + /* + * Skip devices that are still there and take the enabled + * flag into account. + */ + if (acpi_device_is_enabled(adev)) + return 0; + + /* Skip device that have not been enumerated. */ + if (!acpi_device_enumerated(adev)) { + dev_dbg(&adev->dev, "Still not enumerated\n"); + return 0; + } + } + + adev->flags.match_driver = false; + if (handler) { + if (handler->detach) + handler->detach(adev); + } else { + device_release_driver(&adev->dev); + } + /* + * Most likely, the device is going away, so put it into D3cold before + * that. + */ + acpi_device_set_power(adev, ACPI_STATE_D3_COLD); + adev->flags.initialized = false; + + /* For eject this is deferred to acpi_bus_post_eject() */ + if (!(flags & ACPI_SCAN_CHECK_FLAG_EJECT)) { + adev->handler = NULL; + acpi_device_clear_enumerated(adev); + } + return 0; +} + +static int acpi_bus_post_eject(struct acpi_device *adev, void *not_used) +{ + struct acpi_scan_handler *handler = adev->handler; + + acpi_dev_for_each_child_reverse(adev, acpi_bus_post_eject, NULL); + + if (handler) { + if (handler->post_eject) + handler->post_eject(adev); + + adev->handler = NULL; + } + + acpi_device_clear_enumerated(adev); + + return 0; +} + +static void acpi_scan_check_subtree(struct acpi_device *adev) +{ + uintptr_t flags = ACPI_SCAN_CHECK_FLAG_STATUS; + + acpi_scan_check_and_detach(adev, (void *)flags); +} + static int acpi_scan_hot_remove(struct acpi_device *device) { acpi_handle handle = device->handle; unsigned long long sta; acpi_status status; + uintptr_t flags = ACPI_SCAN_CHECK_FLAG_EJECT; if (device->handler && device->handler->hotplug.demand_offline) { if (!acpi_scan_is_offline(device, true)) @@ -265,10 +333,9 @@ static int acpi_scan_hot_remove(struct acpi_device *device) return error; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Hot-removing device %s...\n", dev_name(&device->dev))); + acpi_handle_debug(handle, "Ejecting\n"); - acpi_bus_trim(device); + acpi_scan_check_and_detach(device, (void *)flags); acpi_evaluate_lck(handle, 0); /* @@ -291,79 +358,62 @@ static int acpi_scan_hot_remove(struct acpi_device *device) } else if (sta & ACPI_STA_DEVICE_ENABLED) { acpi_handle_warn(handle, "Eject incomplete - status 0x%llx\n", sta); + } else { + acpi_bus_post_eject(device, NULL); } return 0; } -static int acpi_scan_device_not_present(struct acpi_device *adev) +static int acpi_scan_rescan_bus(struct acpi_device *adev) { - if (!acpi_device_enumerated(adev)) { - dev_warn(&adev->dev, "Still not present\n"); - return -EALREADY; - } - acpi_bus_trim(adev); - return 0; + struct acpi_scan_handler *handler = adev->handler; + int ret; + + if (handler && handler->hotplug.scan_dependent) + ret = handler->hotplug.scan_dependent(adev); + else + ret = acpi_bus_scan(adev->handle); + + if (ret) + dev_info(&adev->dev, "Namespace scan failure\n"); + + return ret; } static int acpi_scan_device_check(struct acpi_device *adev) { - int error; + struct acpi_device *parent; - acpi_bus_get_status(adev); - if (adev->status.present || adev->status.functional) { - /* - * This function is only called for device objects for which - * matching scan handlers exist. The only situation in which - * the scan handler is not attached to this device object yet - * is when the device has just appeared (either it wasn't - * present at all before or it was removed and then added - * again). - */ - if (adev->handler) { - dev_warn(&adev->dev, "Already enumerated\n"); - return -EALREADY; - } - error = acpi_bus_scan(adev->handle); - if (error) { - dev_warn(&adev->dev, "Namespace scan failure\n"); - return error; - } - if (!adev->handler) { - dev_warn(&adev->dev, "Enumeration failure\n"); - error = -ENODEV; - } - } else { - error = acpi_scan_device_not_present(adev); + acpi_scan_check_subtree(adev); + + if (!acpi_device_is_present(adev)) + return 0; + + /* + * This function is only called for device objects for which matching + * scan handlers exist. The only situation in which the scan handler + * is not attached to this device object yet is when the device has + * just appeared (either it wasn't present at all before or it was + * removed and then added again). + */ + if (adev->handler) { + dev_dbg(&adev->dev, "Already enumerated\n"); + return 0; } - return error; + + parent = acpi_dev_parent(adev); + if (!parent) + parent = adev; + + return acpi_scan_rescan_bus(parent); } static int acpi_scan_bus_check(struct acpi_device *adev) { - struct acpi_scan_handler *handler = adev->handler; - struct acpi_device *child; - int error; + acpi_scan_check_subtree(adev); - acpi_bus_get_status(adev); - if (!(adev->status.present || adev->status.functional)) { - acpi_scan_device_not_present(adev); - return 0; - } - if (handler && handler->hotplug.scan_dependent) - return handler->hotplug.scan_dependent(adev); - - error = acpi_bus_scan(adev->handle); - if (error) { - dev_warn(&adev->dev, "Namespace scan failure\n"); - return error; - } - list_for_each_entry(child, &adev->children, node) { - error = acpi_scan_bus_check(child); - if (error) - return error; - } - return 0; + return acpi_scan_rescan_bus(adev); } static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type) @@ -407,7 +457,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) } else if (adev->flags.hotplug_notify) { error = acpi_generic_hotplug_event(adev, src); } else { - int (*notify)(struct acpi_device *, u32); + acpi_hp_notify notify; acpi_lock_hp_context(); notify = adev->hp ? adev->hp->notify : NULL; @@ -440,7 +490,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) acpi_evaluate_ost(adev->handle, src, ost_code, NULL); out: - acpi_bus_put_acpi_device(adev); + acpi_put_acpi_dev(adev); mutex_unlock(&acpi_scan_lock); unlock_device_hotplug(); } @@ -476,22 +526,22 @@ static void acpi_device_del(struct acpi_device *device) struct acpi_device_bus_id *acpi_device_bus_id; mutex_lock(&acpi_device_lock); - if (device->parent) - list_del(&device->node); list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { - if (acpi_device_bus_id->instance_no > 0) - acpi_device_bus_id->instance_no--; - else { + ida_free(&acpi_device_bus_id->instance_ida, + device->pnp.instance_no); + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { list_del(&acpi_device_bus_id->node); + kfree_const(acpi_device_bus_id->bus_id); kfree(acpi_device_bus_id); } break; } list_del(&device->wakeup_list); + mutex_unlock(&acpi_device_lock); acpi_power_add_remove_device(device, false); @@ -533,7 +583,7 @@ static void acpi_device_del_work_fn(struct work_struct *work_not_used) * used by the device. */ acpi_power_transition(adev, ACPI_STATE_D3_COLD); - put_device(&adev->dev); + acpi_dev_put(adev); } } @@ -563,7 +613,7 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context) * prevents attempts to register device objects identical to those being * deleted from happening concurrently (such attempts result from * hotplug events handled via the ACPI hotplug workqueue). It also will - * run after all of the work items submitted previosuly, which helps + * run after all of the work items submitted previously, which helps * those work items to ensure that they are not accessing stale device * objects. */ @@ -577,152 +627,249 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context) mutex_unlock(&acpi_device_del_lock); } -static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, - void (*callback)(void *)) +static struct acpi_device *handle_to_device(acpi_handle handle, + void (*callback)(void *)) { + struct acpi_device *adev = NULL; acpi_status status; - if (!device) - return -EINVAL; - status = acpi_get_data_full(handle, acpi_scan_drop_device, - (void **)device, callback); - if (ACPI_FAILURE(status) || !*device) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", - handle)); - return -ENODEV; + (void **)&adev, callback); + if (ACPI_FAILURE(status) || !adev) { + acpi_handle_debug(handle, "No context!\n"); + return NULL; } - return 0; + return adev; } -int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) +/** + * acpi_fetch_acpi_dev - Retrieve ACPI device object. + * @handle: ACPI handle associated with the requested ACPI device object. + * + * Return a pointer to the ACPI device object associated with @handle, if + * present, or NULL otherwise. + */ +struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle) { - return acpi_get_device_data(handle, device, NULL); + return handle_to_device(handle, NULL); } -EXPORT_SYMBOL(acpi_bus_get_device); +EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev); static void get_acpi_device(void *dev) { - if (dev) - get_device(&((struct acpi_device *)dev)->dev); + acpi_dev_get(dev); } -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle) +/** + * acpi_get_acpi_dev - Retrieve ACPI device object and reference count it. + * @handle: ACPI handle associated with the requested ACPI device object. + * + * Return a pointer to the ACPI device object associated with @handle and bump + * up that object's reference counter (under the ACPI Namespace lock), if + * present, or return NULL otherwise. + * + * The ACPI device object reference acquired by this function needs to be + * dropped via acpi_dev_put(). + */ +struct acpi_device *acpi_get_acpi_dev(acpi_handle handle) { - struct acpi_device *adev = NULL; - - acpi_get_device_data(handle, &adev, get_acpi_device); - return adev; + return handle_to_device(handle, get_acpi_device); } +EXPORT_SYMBOL_GPL(acpi_get_acpi_dev); -void acpi_bus_put_acpi_device(struct acpi_device *adev) +static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) { - put_device(&adev->dev); + struct acpi_device_bus_id *acpi_device_bus_id; + + /* Find suitable bus_id and instance number in acpi_bus_id_list. */ + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { + if (!strcmp(acpi_device_bus_id->bus_id, dev_id)) + return acpi_device_bus_id; + } + return NULL; } -int acpi_device_add(struct acpi_device *device, - void (*release)(struct device *)) +static int acpi_device_set_name(struct acpi_device *device, + struct acpi_device_bus_id *acpi_device_bus_id) { + struct ida *instance_ida = &acpi_device_bus_id->instance_ida; int result; - struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; - int found = 0; - if (device->handle) { - acpi_status status; + result = ida_alloc(instance_ida, GFP_KERNEL); + if (result < 0) + return result; - status = acpi_attach_data(device->handle, acpi_scan_drop_device, - device); - if (ACPI_FAILURE(status)) { - acpi_handle_err(device->handle, - "Unable to attach device data\n"); - return -ENODEV; - } + device->pnp.instance_no = result; + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); + return 0; +} + +int acpi_tie_acpi_dev(struct acpi_device *adev) +{ + acpi_handle handle = adev->handle; + acpi_status status; + + if (!handle) + return 0; + + status = acpi_attach_data(handle, acpi_scan_drop_device, adev); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "Unable to attach device data\n"); + return -ENODEV; } + return 0; +} + +static void acpi_store_pld_crc(struct acpi_device *adev) +{ + struct acpi_pld_info *pld; + + if (!acpi_get_physical_device_location(adev->handle, &pld)) + return; + + adev->pld_crc = crc32(~0, pld, sizeof(*pld)); + ACPI_FREE(pld); +} + +int acpi_device_add(struct acpi_device *device) +{ + struct acpi_device_bus_id *acpi_device_bus_id; + int result; + /* * Linkage * ------- * Link this device to its parent and siblings. */ - INIT_LIST_HEAD(&device->children); - INIT_LIST_HEAD(&device->node); INIT_LIST_HEAD(&device->wakeup_list); INIT_LIST_HEAD(&device->physical_node_list); INIT_LIST_HEAD(&device->del_list); mutex_init(&device->physical_node_lock); - new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); - if (!new_bus_id) { - pr_err(PREFIX "Memory allocation error\n"); - result = -ENOMEM; - goto err_detach; - } - mutex_lock(&acpi_device_lock); - /* - * Find suitable bus_id and instance number in acpi_bus_id_list - * If failed, create one and link it into acpi_bus_id_list - */ - list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { - if (!strcmp(acpi_device_bus_id->bus_id, - acpi_device_hid(device))) { - acpi_device_bus_id->instance_no++; - found = 1; - kfree(new_bus_id); - break; + + acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); + if (acpi_device_bus_id) { + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) + goto err_unlock; + } else { + acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), + GFP_KERNEL); + if (!acpi_device_bus_id) { + result = -ENOMEM; + goto err_unlock; } - } - if (!found) { - acpi_device_bus_id = new_bus_id; - strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); - acpi_device_bus_id->instance_no = 0; + acpi_device_bus_id->bus_id = + kstrdup_const(acpi_device_hid(device), GFP_KERNEL); + if (!acpi_device_bus_id->bus_id) { + kfree(acpi_device_bus_id); + result = -ENOMEM; + goto err_unlock; + } + + ida_init(&acpi_device_bus_id->instance_ida); + + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) { + kfree_const(acpi_device_bus_id->bus_id); + kfree(acpi_device_bus_id); + goto err_unlock; + } + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); - - if (device->parent) - list_add_tail(&device->node, &device->parent->children); if (device->wakeup.flags.valid) list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); + + acpi_store_pld_crc(device); + mutex_unlock(&acpi_device_lock); - if (device->parent) - device->dev.parent = &device->parent->dev; - device->dev.bus = &acpi_bus_type; - device->dev.release = release; result = device_add(&device->dev); if (result) { dev_err(&device->dev, "Error registering device\n"); goto err; } - result = acpi_device_setup_files(device); - if (result) - printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", - dev_name(&device->dev)); + acpi_device_setup_files(device); return 0; - err: +err: mutex_lock(&acpi_device_lock); - if (device->parent) - list_del(&device->node); + list_del(&device->wakeup_list); + +err_unlock: mutex_unlock(&acpi_device_lock); - err_detach: acpi_detach_data(device->handle, acpi_scan_drop_device); + return result; } /* -------------------------------------------------------------------------- Device Enumeration -------------------------------------------------------------------------- */ -static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) +static bool acpi_info_matches_ids(struct acpi_device_info *info, + const char * const ids[]) { - struct acpi_device *device = NULL; - acpi_status status; + struct acpi_pnp_device_id_list *cid_list = NULL; + int i, index; + + if (!(info->valid & ACPI_VALID_HID)) + return false; + + index = match_string(ids, -1, info->hardware_id.string); + if (index >= 0) + return true; + + if (info->valid & ACPI_VALID_CID) + cid_list = &info->compatible_id_list; + + if (!cid_list) + return false; + + for (i = 0; i < cid_list->count; i++) { + index = match_string(ids, -1, cid_list->ids[i].string); + if (index >= 0) + return true; + } + + return false; +} + +/* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */ +static const char * const acpi_ignore_dep_ids[] = { + "PNP0D80", /* Windows-compatible System Power Management Controller */ + "INT33BD", /* Intel Baytrail Mailbox Device */ + "INTC10DE", /* Intel CVS LNL */ + "INTC10E0", /* Intel CVS ARL */ + "LATT2021", /* Lattice FW Update Client Driver */ + NULL +}; + +/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */ +static const char * const acpi_honor_dep_ids[] = { + "INT3472", /* Camera sensor PMIC / clk and regulator info */ + "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ + "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ + "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ + "INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */ + "RSCV0001", /* RISC-V PLIC */ + "RSCV0002", /* RISC-V APLIC */ + "RSCV0005", /* RISC-V SBI MPXY MBOX */ + "RSCV0006", /* RISC-V RPMI SYSMSI */ + "PNP0C0F", /* PCI Link Device */ + NULL +}; + +static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle) +{ + struct acpi_device *adev; /* * Fixed hardware devices do not appear in the namespace and do not @@ -733,11 +880,18 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) return acpi_root; do { + acpi_status status; + status = acpi_get_parent(handle, &handle); - if (ACPI_FAILURE(status)) - return status == AE_NULL_ENTRY ? NULL : acpi_root; - } while (acpi_bus_get_device(handle, &device)); - return device; + if (ACPI_FAILURE(status)) { + if (status != AE_NULL_ENTRY) + return acpi_root; + + return NULL; + } + adev = acpi_fetch_acpi_dev(handle); + } while (!adev); + return adev; } acpi_status @@ -763,24 +917,23 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) } EXPORT_SYMBOL_GPL(acpi_bus_get_ejd); -static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, - struct acpi_device_wakeup *wakeup) +static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev) { + acpi_handle handle = dev->handle; + struct acpi_device_wakeup *wakeup = &dev->wakeup; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package = NULL; union acpi_object *element = NULL; acpi_status status; int err = -ENODATA; - if (!wakeup) - return -EINVAL; - INIT_LIST_HEAD(&wakeup->resources); /* _PRW */ status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); + acpi_handle_info(handle, "_PRW evaluation failed: %s\n", + acpi_format_exception(status)); return err; } @@ -845,26 +998,29 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, return err; } +/* Do not use a button for S5 wakeup */ +#define ACPI_AVOID_WAKE_FROM_S5 BIT(0) + static bool acpi_wakeup_gpe_init(struct acpi_device *device) { static const struct acpi_device_id button_device_ids[] = { - {"PNP0C0C", 0}, - {"PNP0C0D", 0}, - {"PNP0C0E", 0}, + {"PNP0C0C", 0}, /* Power button */ + {"PNP0C0D", ACPI_AVOID_WAKE_FROM_S5}, /* Lid */ + {"PNP0C0E", ACPI_AVOID_WAKE_FROM_S5}, /* Sleep button */ {"", 0}, }; struct acpi_device_wakeup *wakeup = &device->wakeup; + const struct acpi_device_id *match; acpi_status status; wakeup->flags.notifier_present = 0; /* Power button, Lid switch always enable wakeup */ - if (!acpi_match_device_ids(device, button_device_ids)) { - if (!acpi_match_device_ids(device, &button_device_ids[1])) { - /* Do not use Lid/sleep button for S5 wakeup */ - if (wakeup->sleep_state == ACPI_STATE_S5) - wakeup->sleep_state = ACPI_STATE_S4; - } + match = acpi_match_acpi_device(button_device_ids, device); + if (match) { + if ((match->driver_data & ACPI_AVOID_WAKE_FROM_S5) && + wakeup->sleep_state == ACPI_STATE_S5) + wakeup->sleep_state = ACPI_STATE_S4; acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number); device_set_wakeup_capable(&device->dev, true); return true; @@ -883,10 +1039,9 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) if (!acpi_has_method(device->handle, "_PRW")) return; - err = acpi_bus_extract_wakeup_device_power_package(device->handle, - &device->wakeup); + err = acpi_bus_extract_wakeup_device_power_package(device); if (err) { - dev_err(&device->dev, "_PRW evaluation error: %d\n", err); + dev_err(&device->dev, "Unable to extract wakeup power resources"); return; } @@ -895,14 +1050,13 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) /* * Call _PSW/_DSW object to disable its ability to wake the sleeping * system for the ACPI device with the _PRW object. - * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. + * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW. * So it is necessary to call _DSW object first. Only when it is not * present will the _PSW object used. */ err = acpi_device_sleep_wake(device, 0, 0, 0); if (err) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "error in _DSW or _PSW evaluation\n")); + pr_debug("error in _DSW or _PSW evaluation\n"); } static void acpi_bus_init_power_state(struct acpi_device *device, int state) @@ -921,12 +1075,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (buffer.length && package && package->type == ACPI_TYPE_PACKAGE - && package->package.count) { - int err = acpi_extract_power_resources(package, 0, - &ps->resources); - if (!err) - device->power.flags.power_resources = 1; - } + && package->package.count) + acpi_extract_power_resources(package, 0, &ps->resources); + ACPI_FREE(buffer.pointer); } @@ -945,6 +1096,7 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) static void acpi_bus_get_power_flags(struct acpi_device *device) { + unsigned long long dsc = ACPI_STATE_D0; u32 i; /* Presence of _PS0|_PR0 indicates 'power manageable' */ @@ -966,6 +1118,9 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) if (acpi_has_method(device->handle, "_DSW")) device->power.flags.dsw_present = 1; + acpi_evaluate_integer(device->handle, "_DSC", NULL, &dsc); + device->power.state_for_enumeration = dsc; + /* * Enumerate supported power management states */ @@ -973,14 +1128,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); - if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3hot states (always valid) */ + /* Set the defaults for D0 and D3hot (always supported). */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + /* + * Use power resources only if the D0 list of them is populated, because + * some platforms may provide _PR3 only to indicate D3cold support and + * in those cases the power resources list returned by it may be bogus. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { + device->power.flags.power_resources = 1; + /* + * D3cold is supported if the D3hot list of power resources is + * not empty. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + } + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; } @@ -1013,20 +1181,20 @@ static void acpi_device_get_busid(struct acpi_device *device) * The device's Bus ID is simply the object name. * TBD: Shouldn't this value be unique (within the ACPI namespace)? */ - if (ACPI_IS_ROOT_DEVICE(device)) { - strcpy(device->pnp.bus_id, "ACPI"); + if (!acpi_dev_parent(device)) { + strscpy(device->pnp.bus_id, "ACPI"); return; } switch (device->device_type) { case ACPI_BUS_TYPE_POWER_BUTTON: - strcpy(device->pnp.bus_id, "PWRF"); + strscpy(device->pnp.bus_id, "PWRF"); break; case ACPI_BUS_TYPE_SLEEP_BUTTON: - strcpy(device->pnp.bus_id, "SLPF"); + strscpy(device->pnp.bus_id, "SLPF"); break; case ACPI_BUS_TYPE_ECDT_EC: - strcpy(device->pnp.bus_id, "ECDT"); + strscpy(device->pnp.bus_id, "ECDT"); break; default: acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); @@ -1037,7 +1205,7 @@ static void acpi_device_get_busid(struct acpi_device *device) else break; } - strcpy(device->pnp.bus_id, bus_id); + strscpy(device->pnp.bus_id, bus_id); break; } } @@ -1113,8 +1281,7 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, if (acpi_has_method(handle, "_BCM") && acpi_has_method(handle, "_BCL")) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " - "support\n")); + acpi_handle_debug(handle, "Found generic backlight support\n"); *cap |= ACPI_VIDEO_BACKLIGHT; /* We have backlight support, no need to scan further */ return AE_CTRL_TERMINATE; @@ -1162,10 +1329,10 @@ const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; - if (list_empty(&device->pnp.ids)) + hid = list_first_entry_or_null(&device->pnp.ids, struct acpi_hardware_id, list); + if (!hid) return dummy_hid; - hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); return hid->id; } EXPORT_SYMBOL(acpi_device_hid); @@ -1230,10 +1397,9 @@ static bool acpi_object_is_system_bus(acpi_handle handle) } static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, - int device_type) + int device_type) { - acpi_status status; - struct acpi_device_info *info; + struct acpi_device_info *info = NULL; struct acpi_pnp_device_id_list *cid_list; int i; @@ -1244,10 +1410,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, break; } - status = acpi_get_object_info(handle, &info); - if (ACPI_FAILURE(status)) { - pr_err(PREFIX "%s: Error reading device info\n", - __func__); + acpi_get_object_info(handle, &info); + if (!info) { + pr_err("%s: Error reading device info\n", __func__); return; } @@ -1276,9 +1441,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, * Some devices don't reliably have _HIDs & _CIDs, so add * synthetic HIDs to make sure drivers can find them. */ - if (acpi_is_video_device(handle)) + if (acpi_is_video_device(handle)) { acpi_add_id(pnp, ACPI_VIDEO_HID); - else if (acpi_bay_match(handle)) + pnp->type.backlight = 1; + break; + } + if (acpi_bay_match(handle)) acpi_add_id(pnp, ACPI_BAY_HID); else if (acpi_dock_match(handle)) acpi_add_id(pnp, ACPI_DOCK_HID); @@ -1288,8 +1456,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, acpi_object_is_system_bus(handle)) { /* \_SB, \_TZ, LNXSYBUS */ acpi_add_id(pnp, ACPI_BUS_HID); - strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME); - strcpy(pnp->device_class, ACPI_BUS_CLASS); + strscpy(pnp->device_name, ACPI_BUS_DEVICE_NAME); + strscpy(pnp->device_class, ACPI_BUS_CLASS); } break; @@ -1331,7 +1499,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) * * Return false if DMA is not supported. Otherwise, return true */ -bool acpi_dma_supported(struct acpi_device *adev) +bool acpi_dma_supported(const struct acpi_device *adev) { if (!adev) return false; @@ -1371,25 +1539,21 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) * acpi_dma_get_range() - Get device DMA parameters. * * @dev: device to configure - * @dma_addr: pointer device DMA address result - * @offset: pointer to the DMA offset result - * @size: pointer to DMA range size result + * @map: pointer to DMA ranges result * - * Evaluate DMA regions and return respectively DMA region start, offset - * and size in dma_addr, offset and size on parsing success; it does not - * update the passed in values on failure. + * Evaluate DMA regions and return pointer to DMA regions on + * parsing success; it does not update the passed in values on failure. * * Return 0 on success, < 0 on failure. */ -int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, - u64 *size) +int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) { struct acpi_device *adev; LIST_HEAD(list); struct resource_entry *rentry; int ret; struct device *dma_dev = dev; - u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; + struct bus_dma_region *r; /* * Walk the device tree chasing an ACPI companion with a _DMA @@ -1414,31 +1578,28 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, ret = acpi_dev_get_dma_resources(adev, &list); if (ret > 0) { + r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL); + if (!r) { + ret = -ENOMEM; + goto out; + } + + *map = r; + list_for_each_entry(rentry, &list, node) { - if (dma_offset && rentry->offset != dma_offset) { + if (rentry->res->start >= rentry->res->end) { + kfree(*map); + *map = NULL; ret = -EINVAL; - dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n"); + dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); goto out; } - dma_offset = rentry->offset; - /* Take lower and upper limits */ - if (rentry->res->start < dma_start) - dma_start = rentry->res->start; - if (rentry->res->end > dma_end) - dma_end = rentry->res->end; + r->cpu_start = rentry->res->start; + r->dma_start = rentry->res->start - rentry->offset; + r->size = resource_size(rentry->res); + r++; } - - if (dma_start >= dma_end) { - ret = -EINVAL; - dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); - goto out; - } - - *dma_addr = dma_start - dma_offset; - len = dma_end - dma_start; - *size = max(len, len + 1); - *offset = dma_offset; } out: acpi_dev_free_resource_list(&list); @@ -1446,39 +1607,93 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, return ret >= 0 ? 0 : ret; } +#ifdef CONFIG_IOMMU_API +int acpi_iommu_fwspec_init(struct device *dev, u32 id, + struct fwnode_handle *fwnode) +{ + int ret; + + ret = iommu_fwspec_init(dev, fwnode); + if (ret) + return ret; + + return iommu_fwspec_add_ids(dev, &id, 1); +} + +static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) +{ + int err; + + /* Serialise to make dev->iommu stable under our potential fwspec */ + mutex_lock(&iommu_probe_device_lock); + /* If we already translated the fwspec there is nothing left to do */ + if (dev_iommu_fwspec_get(dev)) { + mutex_unlock(&iommu_probe_device_lock); + return 0; + } + + err = iort_iommu_configure_id(dev, id_in); + if (err && err != -EPROBE_DEFER) + err = rimt_iommu_configure_id(dev, id_in); + if (err && err != -EPROBE_DEFER) + err = viot_iommu_configure(dev); + + mutex_unlock(&iommu_probe_device_lock); + + return err; +} + +#else /* !CONFIG_IOMMU_API */ + +int acpi_iommu_fwspec_init(struct device *dev, u32 id, + struct fwnode_handle *fwnode) +{ + return -ENODEV; +} + +static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) +{ + return -ENODEV; +} + +#endif /* !CONFIG_IOMMU_API */ + /** - * acpi_dma_configure - Set-up DMA configuration for the device. + * acpi_dma_configure_id - Set-up DMA configuration for the device. * @dev: The pointer to the device * @attr: device dma attributes + * @input_id: input device id const value pointer */ -int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) +int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, + const u32 *input_id) { - const struct iommu_ops *iommu; - u64 dma_addr = 0, size = 0; + int ret; if (attr == DEV_DMA_NOT_SUPPORTED) { set_dma_ops(dev, &dma_dummy_ops); return 0; } - iort_dma_setup(dev, &dma_addr, &size); + acpi_arch_dma_setup(dev); - iommu = iort_iommu_configure(dev); - if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) + /* Ignore all other errors apart from EPROBE_DEFER */ + ret = acpi_iommu_configure_id(dev, input_id); + if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; + if (ret) + dev_dbg(dev, "Adding to IOMMU failed: %d\n", ret); - arch_setup_dma_ops(dev, dma_addr, size, - iommu, attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT); return 0; } -EXPORT_SYMBOL_GPL(acpi_dma_configure); +EXPORT_SYMBOL_GPL(acpi_dma_configure_id); static void acpi_init_coherency(struct acpi_device *adev) { unsigned long long cca = 0; acpi_status status; - struct acpi_device *parent = adev->parent; + struct acpi_device *parent = acpi_dev_parent(adev); if (parent && parent->flags.cca_seen) { /* @@ -1522,7 +1737,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data) static bool acpi_is_indirect_io_slave(struct acpi_device *device) { - struct acpi_device *parent = device->parent; + struct acpi_device *parent = acpi_dev_parent(device); static const struct acpi_device_id indirect_io_hosts[] = { {"HISI0191", 0}, {} @@ -1535,18 +1750,42 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) { struct list_head resource_list; bool is_serial_bus_slave = false; + static const struct acpi_device_id ignore_serial_bus_ids[] = { /* - * These devices have multiple I2cSerialBus resources and an i2c-client - * must be instantiated for each, each with its own i2c_device_id. - * Normally we only instantiate an i2c-client for the first resource, - * using the ACPI HID as id. These special cases are handled by the - * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows - * which i2c_device_id to use for each resource. + * These devices have multiple SerialBus resources and a client + * device must be instantiated for each of them, each with + * its own device id. + * Normally we only instantiate one client device for the first + * resource, using the ACPI HID as id. These special cases are handled + * by the drivers/platform/x86/serial-multi-instantiate.c driver, which + * knows which client device id to use for each resource. */ - static const struct acpi_device_id i2c_multi_instantiate_ids[] = { {"BSG1160", }, + {"BSG2150", }, + {"CSC3551", }, + {"CSC3554", }, + {"CSC3556", }, + {"CSC3557", }, {"INT33FE", }, {"INT3515", }, + {"TXNW2781", }, + /* Non-conforming _HID for Cirrus Logic already released */ + {"CLSA0100", }, + {"CLSA0101", }, + /* + * Some ACPI devs contain SerialBus resources even though they are not + * attached to a serial bus at all. + */ + {ACPI_VIDEO_HID, }, + {"MSHW0028", }, + /* + * HIDs of device with an UartSerialBusV2 resource for which userspace + * expects a regular tty cdev to be created (instead of the in kernel + * serdev) and which have a kernel driver which expects a platform_dev + * such as the rfkill-gpio driver. + */ + {"BCM4752", }, + {"LNV4752", }, {} }; @@ -1560,8 +1799,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) fwnode_property_present(&device->fwnode, "baud"))) return true; - /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */ - if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids)) + if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) return false; INIT_LIST_HEAD(&resource_list); @@ -1574,14 +1812,19 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) } void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, - int type, unsigned long long sta) + int type, void (*release)(struct device *)) { + struct acpi_device *parent = acpi_find_parent_acpi_dev(handle); + INIT_LIST_HEAD(&device->pnp.ids); device->device_type = type; device->handle = handle; - device->parent = acpi_bus_get_parent(handle); - device->fwnode.ops = &acpi_device_fwnode_ops; - acpi_set_device_status(device, sta); + device->dev.parent = parent ? &parent->dev : NULL; + device->dev.release = release; + device->dev.bus = &acpi_bus_type; + device->dev.groups = acpi_groups; + fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); + acpi_set_device_status(device, ACPI_STA_DEFAULT); acpi_device_get_busid(device); acpi_set_pnp_ids(handle, &device->pnp, type); acpi_init_properties(device); @@ -1594,8 +1837,21 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); - /* Assume there are unmet deps until acpi_device_dep_initialize() runs */ - device->dep_unmet = 1; +} + +static void acpi_scan_dep_init(struct acpi_device *adev) +{ + struct acpi_dep_data *dep; + + list_for_each_entry(dep, &acpi_dep_list, node) { + if (dep->consumer == adev->handle) { + if (dep->honor_dep) + adev->flags.honor_deps = 1; + + if (!dep->met) + adev->dep_unmet++; + } + } } void acpi_device_add_finalize(struct acpi_device *device) @@ -1604,34 +1860,55 @@ void acpi_device_add_finalize(struct acpi_device *device) kobject_uevent(&device->dev.kobj, KOBJ_ADD); } +static void acpi_scan_init_status(struct acpi_device *adev) +{ + if (acpi_bus_get_status(adev)) + acpi_set_device_status(adev, 0); +} + static int acpi_add_single_object(struct acpi_device **child, - acpi_handle handle, int type, - unsigned long long sta) + acpi_handle handle, int type, bool dep_init) { - int result; struct acpi_device *device; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + bool release_dep_lock = false; + int result; device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); - if (!device) { - printk(KERN_ERR PREFIX "Memory allocation error\n"); + if (!device) return -ENOMEM; - } - acpi_init_device_object(device, handle, type, sta); + acpi_init_device_object(device, handle, type, acpi_device_release); /* - * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so - * that we can call acpi_bus_get_status() and use its quirk handling. - * Note this must be done before the get power-/wakeup_dev-flags calls. + * Getting the status is delayed till here so that we can call + * acpi_bus_get_status() and use its quirk handling. Note that + * this must be done before the get power-/wakeup_dev-flags calls. */ - if (type == ACPI_BUS_TYPE_DEVICE) - if (acpi_bus_get_status(device) < 0) - acpi_set_device_status(device, 0); + if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) { + if (dep_init) { + mutex_lock(&acpi_dep_list_lock); + /* + * Hold the lock until the acpi_tie_acpi_dev() call + * below to prevent concurrent acpi_scan_clear_dep() + * from deleting a dependency list entry without + * updating dep_unmet for the device. + */ + release_dep_lock = true; + acpi_scan_dep_init(device); + } + acpi_scan_init_status(device); + } acpi_bus_get_power_flags(device); acpi_bus_get_wakeup_device_flags(device); - result = acpi_device_add(device, acpi_device_release); + result = acpi_tie_acpi_dev(device); + + if (release_dep_lock) + mutex_unlock(&acpi_dep_list_lock); + + if (!result) + result = acpi_device_add(device); + if (result) { acpi_device_release(&device->dev); return result; @@ -1639,11 +1916,11 @@ static int acpi_add_single_object(struct acpi_device **child, acpi_power_add_remove_device(device, true); acpi_device_add_finalize(device); - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n", - dev_name(&device->dev), (char *) buffer.pointer, - device->parent ? dev_name(&device->parent->dev) : "(null)")); - kfree(buffer.pointer); + + acpi_handle_debug(handle, "Added as %s, parent %s\n", + dev_name(&device->dev), device->dev.parent ? + dev_name(device->dev.parent) : "(null)"); + *child = device; return 0; } @@ -1683,53 +1960,14 @@ static bool acpi_device_should_be_hidden(acpi_handle handle) return true; } -static int acpi_bus_type_and_status(acpi_handle handle, int *type, - unsigned long long *sta) +bool acpi_device_is_present(const struct acpi_device *adev) { - acpi_status status; - acpi_object_type acpi_type; - - status = acpi_get_type(handle, &acpi_type); - if (ACPI_FAILURE(status)) - return -ENODEV; - - switch (acpi_type) { - case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ - case ACPI_TYPE_DEVICE: - if (acpi_device_should_be_hidden(handle)) - return -ENODEV; - - *type = ACPI_BUS_TYPE_DEVICE; - /* - * acpi_add_single_object updates this once we've an acpi_device - * so that acpi_bus_get_status' quirk handling can be used. - */ - *sta = ACPI_STA_DEFAULT; - break; - case ACPI_TYPE_PROCESSOR: - *type = ACPI_BUS_TYPE_PROCESSOR; - status = acpi_bus_get_status_handle(handle, sta); - if (ACPI_FAILURE(status)) - return -ENODEV; - break; - case ACPI_TYPE_THERMAL: - *type = ACPI_BUS_TYPE_THERMAL; - *sta = ACPI_STA_DEFAULT; - break; - case ACPI_TYPE_POWER: - *type = ACPI_BUS_TYPE_POWER; - *sta = ACPI_STA_DEFAULT; - break; - default: - return -ENODEV; - } - - return 0; + return adev->status.present || adev->status.functional; } -bool acpi_device_is_present(const struct acpi_device *adev) +bool acpi_device_is_enabled(const struct acpi_device *adev) { - return adev->status.present || adev->status.functional; + return adev->status.enabled; } static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, @@ -1776,6 +2014,49 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) mutex_unlock(&acpi_scan_lock); } +int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) +{ + u32 count; + int i; + + for (count = 0, i = 0; i < dep_devices->count; i++) { + struct acpi_device_info *info; + struct acpi_dep_data *dep; + bool skip, honor_dep; + acpi_status status; + + status = acpi_get_object_info(dep_devices->handles[i], &info); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Error reading _DEP device info\n"); + continue; + } + + skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids); + honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids); + kfree(info); + + if (skip) + continue; + + dep = kzalloc(sizeof(*dep), GFP_KERNEL); + if (!dep) + continue; + + count++; + + dep->supplier = dep_devices->handles[i]; + dep->consumer = handle; + dep->honor_dep = honor_dep; + + mutex_lock(&acpi_dep_list_lock); + list_add_tail(&dep->node, &acpi_dep_list); + mutex_unlock(&acpi_dep_list_lock); + } + + acpi_handle_list_free(dep_devices); + return count; +} + static void acpi_scan_init_hotplug(struct acpi_device *adev) { struct acpi_hardware_id *hwid; @@ -1795,96 +2076,133 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev) } } -static void acpi_device_dep_initialize(struct acpi_device *adev) +u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; } + +static u32 acpi_scan_check_dep(acpi_handle handle) { - struct acpi_dep_data *dep; struct acpi_handle_list dep_devices; - acpi_status status; - int i; + u32 count = 0; - adev->dep_unmet = 0; + /* + * Some architectures like RISC-V need to add dependencies for + * all devices which use GSI to the interrupt controller so that + * interrupt controller is probed before any of those devices. + * Instead of mandating _DEP on all the devices, detect the + * dependency and add automatically. + */ + count += arch_acpi_add_auto_dep(handle); - if (!acpi_has_method(adev->handle, "_DEP")) - return; + /* + * Check for _HID here to avoid deferring the enumeration of: + * 1. PCI devices. + * 2. ACPI nodes describing USB ports. + * Still, checking for _HID catches more then just these cases ... + */ + if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) + return count; - status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, - &dep_devices); - if (ACPI_FAILURE(status)) { - dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); - return; + if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { + acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); + return count; } - for (i = 0; i < dep_devices.count; i++) { - struct acpi_device_info *info; - int skip; - - status = acpi_get_object_info(dep_devices.handles[i], &info); - if (ACPI_FAILURE(status)) { - dev_dbg(&adev->dev, "Error reading _DEP device info\n"); - continue; - } - - /* - * Skip the dependency of Windows System Power - * Management Controller - */ - skip = info->valid & ACPI_VALID_HID && - !strcmp(info->hardware_id.string, "INT3396"); - - kfree(info); - - if (skip) - continue; - - dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL); - if (!dep) - return; - - dep->master = dep_devices.handles[i]; - dep->slave = adev->handle; - adev->dep_unmet++; + count += acpi_scan_add_dep(handle, &dep_devices); + return count; +} - mutex_lock(&acpi_dep_list_lock); - list_add_tail(&dep->node , &acpi_dep_list); - mutex_unlock(&acpi_dep_list_lock); - } +static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c) +{ + acpi_mipi_check_crs_csi2(handle); + return AE_OK; } -static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, - void *not_used, void **return_value) +static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass, + struct acpi_device **adev_p) { - struct acpi_device *device = NULL; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); + acpi_object_type acpi_type; int type; - unsigned long long sta; - int result; - acpi_bus_get_device(handle, &device); if (device) goto out; - result = acpi_bus_type_and_status(handle, &type, &sta); - if (result) + if (ACPI_FAILURE(acpi_get_type(handle, &acpi_type))) return AE_OK; - if (type == ACPI_BUS_TYPE_POWER) { + switch (acpi_type) { + case ACPI_TYPE_DEVICE: + if (acpi_device_should_be_hidden(handle)) + return AE_OK; + + if (first_pass) { + acpi_mipi_check_crs_csi2(handle); + + /* Bail out if there are dependencies. */ + if (acpi_scan_check_dep(handle) > 0) { + /* + * The entire CSI-2 connection graph needs to be + * extracted before any drivers or scan handlers + * are bound to struct device objects, so scan + * _CRS CSI-2 resource descriptors for all + * devices below the current handle. + */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, + acpi_scan_check_crs_csi2_cb, + NULL, NULL, NULL); + return AE_CTRL_DEPTH; + } + } + + fallthrough; + case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ + type = ACPI_BUS_TYPE_DEVICE; + break; + + case ACPI_TYPE_PROCESSOR: + type = ACPI_BUS_TYPE_PROCESSOR; + break; + + case ACPI_TYPE_THERMAL: + type = ACPI_BUS_TYPE_THERMAL; + break; + + case ACPI_TYPE_POWER: acpi_add_power_resource(handle); + fallthrough; + default: return AE_OK; } - acpi_add_single_object(&device, handle, type, sta); + /* + * If first_pass is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ + acpi_add_single_object(&device, handle, type, !first_pass); if (!device) return AE_CTRL_DEPTH; acpi_scan_init_hotplug(device); - acpi_device_dep_initialize(device); - out: - if (!*return_value) - *return_value = device; +out: + if (!*adev_p) + *adev_p = device; return AE_OK; } +static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used, + void *not_used, void **ret_p) +{ + return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p); +} + +static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used, + void *not_used, void **ret_p) +{ + return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p); +} + static void acpi_default_enumeration(struct acpi_device *device) { /* @@ -1952,26 +2270,31 @@ static int acpi_scan_attach_handler(struct acpi_device *device) return ret; } -static void acpi_bus_attach(struct acpi_device *device) +static int acpi_bus_attach(struct acpi_device *device, void *first_pass) { - struct acpi_device *child; + bool skip = !first_pass && device->flags.visited; acpi_handle ejd; int ret; + if (skip) + goto ok; + if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd))) register_dock_dependent_device(device, ejd); acpi_bus_get_status(device); - /* Skip devices that are not present. */ - if (!acpi_device_is_present(device)) { + /* Skip devices that are not ready for enumeration (e.g. not present) */ + if (!acpi_dev_ready_for_enumeration(device)) { device->flags.initialized = false; acpi_device_clear_enumerated(device); device->flags.power_manageable = 0; - return; + return 0; } if (device->handler) goto ok; + acpi_ec_register_opregions(device); + if (!device->flags.initialized) { device->flags.power_manageable = device->power.states[ACPI_STATE_D0].flags.valid; @@ -1985,7 +2308,7 @@ static void acpi_bus_attach(struct acpi_device *device) ret = acpi_scan_attach_handler(device); if (ret < 0) - return; + return 0; device->flags.match_driver = true; if (ret > 0 && !device->flags.enumeration_by_parent) { @@ -1995,43 +2318,258 @@ static void acpi_bus_attach(struct acpi_device *device) ret = device_attach(&device->dev); if (ret < 0) - return; + return 0; if (device->pnp.type.platform_id || device->flags.enumeration_by_parent) acpi_default_enumeration(device); else acpi_device_set_enumerated(device); - ok: - list_for_each_entry(child, &device->children, node) - acpi_bus_attach(child); +ok: + acpi_dev_for_each_child(device, acpi_bus_attach, first_pass); - if (device->handler && device->handler->hotplug.notify_online) + if (!skip && device->handler && device->handler->hotplug.notify_online) device->handler->hotplug.notify_online(device); + + return 0; } -void acpi_walk_dep_device_list(acpi_handle handle) +static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data) { - struct acpi_dep_data *dep, *tmp; + struct acpi_device **adev_p = data; + struct acpi_device *adev = *adev_p; + + /* + * If we're passed a 'previous' consumer device then we need to skip + * any consumers until we meet the previous one, and then NULL @data + * so the next one can be returned. + */ + if (adev) { + if (dep->consumer == adev->handle) + *adev_p = NULL; + + return 0; + } + + adev = acpi_get_acpi_dev(dep->consumer); + if (adev) { + *(struct acpi_device **)data = adev; + return 1; + } + /* Continue parsing if the device object is not present. */ + return 0; +} + +struct acpi_scan_clear_dep_work { + struct work_struct work; struct acpi_device *adev; +}; + +static void acpi_scan_clear_dep_fn(struct work_struct *work) +{ + struct acpi_scan_clear_dep_work *cdw; + + cdw = container_of(work, struct acpi_scan_clear_dep_work, work); + + acpi_scan_lock_acquire(); + acpi_bus_attach(cdw->adev, (void *)true); + acpi_scan_lock_release(); + + acpi_dev_put(cdw->adev); + kfree(cdw); +} + +static bool acpi_scan_clear_dep_queue(struct acpi_device *adev) +{ + struct acpi_scan_clear_dep_work *cdw; + + if (adev->dep_unmet) + return false; + + cdw = kmalloc(sizeof(*cdw), GFP_KERNEL); + if (!cdw) + return false; + + cdw->adev = adev; + INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn); + /* + * Since the work function may block on the lock until the entire + * initial enumeration of devices is complete, put it into the unbound + * workqueue. + */ + queue_work(system_dfl_wq, &cdw->work); + + return true; +} + +static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep) +{ + list_del(&dep->node); + kfree(dep); +} + +static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) +{ + struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer); + + if (adev) { + adev->dep_unmet--; + if (!acpi_scan_clear_dep_queue(adev)) + acpi_dev_put(adev); + } + + if (dep->free_when_met) + acpi_scan_delete_dep_data(dep); + else + dep->met = true; + + return 0; +} + +/** + * acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list + * @handle: The ACPI handle of the supplier device + * @callback: Pointer to the callback function to apply + * @data: Pointer to some data to pass to the callback + * + * The return value of the callback determines this function's behaviour. If 0 + * is returned we continue to iterate over acpi_dep_list. If a positive value + * is returned then the loop is broken but this function returns 0. If a + * negative value is returned by the callback then the loop is broken and that + * value is returned as the final error. + */ +static int acpi_walk_dep_device_list(acpi_handle handle, + int (*callback)(struct acpi_dep_data *, void *), + void *data) +{ + struct acpi_dep_data *dep, *tmp; + int ret = 0; mutex_lock(&acpi_dep_list_lock); list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { - if (dep->master == handle) { - acpi_bus_get_device(dep->slave, &adev); - if (!adev) - continue; + if (dep->supplier == handle) { + ret = callback(dep, data); + if (ret) + break; + } + } + mutex_unlock(&acpi_dep_list_lock); - adev->dep_unmet--; - if (!adev->dep_unmet) - acpi_bus_attach(adev); - list_del(&dep->node); - kfree(dep); + return ret > 0 ? 0 : ret; +} + +/** + * acpi_dev_clear_dependencies - Inform consumers that the device is now active + * @supplier: Pointer to the supplier &struct acpi_device + * + * Clear dependencies on the given device. + */ +void acpi_dev_clear_dependencies(struct acpi_device *supplier) +{ + acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL); +} +EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies); + +/** + * acpi_dev_ready_for_enumeration - Check if the ACPI device is ready for enumeration + * @device: Pointer to the &struct acpi_device to check + * + * Check if the device is present and has no unmet dependencies. + * + * Return true if the device is ready for enumeratino. Otherwise, return false. + */ +bool acpi_dev_ready_for_enumeration(const struct acpi_device *device) +{ + if (device->flags.honor_deps && device->dep_unmet) + return false; + + return acpi_device_is_present(device); +} +EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration); + +/** + * acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier + * @supplier: Pointer to the dependee device + * @start: Pointer to the current dependent device + * + * Returns the next &struct acpi_device which declares itself dependent on + * @supplier via the _DEP buffer, parsed from the acpi_dep_list. + * + * If the returned adev is not passed as @start to this function, the caller is + * responsible for putting the reference to adev when it is no longer needed. + */ +struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier, + struct acpi_device *start) +{ + struct acpi_device *adev = start; + + acpi_walk_dep_device_list(supplier->handle, + acpi_dev_get_next_consumer_dev_cb, &adev); + + acpi_dev_put(start); + + if (adev == start) + return NULL; + + return adev; +} +EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev); + +static void acpi_scan_postponed_branch(acpi_handle handle) +{ + struct acpi_device *adev = NULL; + + if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev))) + return; + + acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, + acpi_bus_check_add_2, NULL, NULL, (void **)&adev); + + /* + * Populate the ACPI _CRS CSI-2 software nodes for the ACPI devices that + * have been added above. + */ + acpi_mipi_init_crs_csi2_swnodes(); + + acpi_bus_attach(adev, NULL); +} + +static void acpi_scan_postponed(void) +{ + struct acpi_dep_data *dep, *tmp; + + mutex_lock(&acpi_dep_list_lock); + + list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { + acpi_handle handle = dep->consumer; + + /* + * In case there are multiple acpi_dep_list entries with the + * same consumer, skip the current entry if the consumer device + * object corresponding to it is present already. + */ + if (!acpi_fetch_acpi_dev(handle)) { + /* + * Even though the lock is released here, tmp is + * guaranteed to be valid, because none of the list + * entries following dep is marked as "free when met" + * and so they cannot be deleted. + */ + mutex_unlock(&acpi_dep_list_lock); + + acpi_scan_postponed_branch(handle); + + mutex_lock(&acpi_dep_list_lock); } + + if (dep->met) + acpi_scan_delete_dep_data(dep); + else + dep->free_when_met = true; } + mutex_unlock(&acpi_dep_list_lock); } -EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list); /** * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. @@ -2049,17 +2587,35 @@ EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list); */ int acpi_bus_scan(acpi_handle handle) { - void *device = NULL; + struct acpi_device *device = NULL; + + /* Pass 1: Avoid enumerating devices with missing dependencies. */ - if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device))) + if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device))) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, - acpi_bus_check_add, NULL, NULL, &device); + acpi_bus_check_add_1, NULL, NULL, + (void **)&device); - if (device) { - acpi_bus_attach(device); - return 0; - } - return -ENODEV; + if (!device) + return -ENODEV; + + /* + * Set up ACPI _CRS CSI-2 software nodes using information extracted + * from the _CRS CSI-2 resource descriptors during the ACPI namespace + * walk above and MIPI DisCo for Imaging device properties. + */ + acpi_mipi_scan_crs_csi2(); + acpi_mipi_init_crs_csi2_swnodes(); + + acpi_bus_attach(device, (void *)true); + + /* Pass 2: Enumerate all of the remaining devices. */ + + acpi_scan_postponed(); + + acpi_mipi_crs_csi2_cleanup(); + + return 0; } EXPORT_SYMBOL(acpi_bus_scan); @@ -2071,28 +2627,9 @@ EXPORT_SYMBOL(acpi_bus_scan); */ void acpi_bus_trim(struct acpi_device *adev) { - struct acpi_scan_handler *handler = adev->handler; - struct acpi_device *child; - - list_for_each_entry_reverse(child, &adev->children, node) - acpi_bus_trim(child); - - adev->flags.match_driver = false; - if (handler) { - if (handler->detach) - handler->detach(adev); + uintptr_t flags = 0; - adev->handler = NULL; - } else { - device_release_driver(&adev->dev); - } - /* - * Most likely, the device is going away, so put it into D3cold before - * that. - */ - acpi_device_set_power(adev, ACPI_STATE_D3_COLD); - adev->flags.initialized = false; - acpi_device_clear_enumerated(adev); + acpi_scan_check_and_detach(adev, (void *)flags); } EXPORT_SYMBOL_GPL(acpi_bus_trim); @@ -2101,8 +2638,7 @@ int acpi_bus_register_early_device(int type) struct acpi_device *device = NULL; int result; - result = acpi_add_single_object(&device, NULL, - type, ACPI_STA_DEFAULT); + result = acpi_add_single_object(&device, NULL, type, false); if (result) return result; @@ -2111,44 +2647,33 @@ int acpi_bus_register_early_device(int type) } EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); -static int acpi_bus_scan_fixed(void) +static void acpi_bus_scan_fixed(void) { - int result = 0; - - /* - * Enumerate all fixed-feature devices. - */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) { - struct acpi_device *device = NULL; - - result = acpi_add_single_object(&device, NULL, - ACPI_BUS_TYPE_POWER_BUTTON, - ACPI_STA_DEFAULT); - if (result) - return result; - - device->flags.match_driver = true; - result = device_attach(&device->dev); - if (result < 0) - return result; - - device_init_wakeup(&device->dev, true); + struct acpi_device *adev = NULL; + + acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON, + false); + if (adev) { + adev->flags.match_driver = true; + if (device_attach(&adev->dev) >= 0) + device_init_wakeup(&adev->dev, true); + else + dev_dbg(&adev->dev, "No driver\n"); + } } if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) { - struct acpi_device *device = NULL; - - result = acpi_add_single_object(&device, NULL, - ACPI_BUS_TYPE_SLEEP_BUTTON, - ACPI_STA_DEFAULT); - if (result) - return result; - - device->flags.match_driver = true; - result = device_attach(&device->dev); + struct acpi_device *adev = NULL; + + acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON, + false); + if (adev) { + adev->flags.match_driver = true; + if (device_attach(&adev->dev) < 0) + dev_dbg(&adev->dev, "No driver\n"); + } } - - return result < 0 ? result : 0; } static void __init acpi_get_spcr_uart_addr(void) @@ -2158,23 +2683,26 @@ static void __init acpi_get_spcr_uart_addr(void) status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&spcr_ptr); - if (ACPI_SUCCESS(status)) - spcr_uart_addr = spcr_ptr->serial_port.address; - else - printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n"); + if (ACPI_FAILURE(status)) { + pr_warn("STAO table present, but SPCR is missing\n"); + return; + } + + spcr_uart_addr = spcr_ptr->serial_port.address; + acpi_put_table((struct acpi_table_header *)spcr_ptr); } static bool acpi_scan_initialized; -int __init acpi_scan_init(void) +void __init acpi_scan_init(void) { - int result; acpi_status status; struct acpi_table_stao *stao_ptr; acpi_pci_root_init(); acpi_pci_link_init(); acpi_processor_init(); + acpi_platform_init(); acpi_lpss_init(); acpi_apd_init(); acpi_cmos_rtc_init(); @@ -2182,8 +2710,7 @@ int __init acpi_scan_init(void) acpi_memory_hotplug_init(); acpi_watchdog_init(); acpi_pnp_init(); - acpi_int340x_thermal_init(); - acpi_amba_init(); + acpi_power_resources_init(); acpi_init_lpit(); acpi_scan_add_handler(&generic_device_handler); @@ -2196,60 +2723,62 @@ int __init acpi_scan_init(void) (struct acpi_table_header **)&stao_ptr); if (ACPI_SUCCESS(status)) { if (stao_ptr->header.length > sizeof(struct acpi_table_stao)) - printk(KERN_INFO PREFIX "STAO Name List not yet supported."); + pr_info("STAO Name List not yet supported.\n"); if (stao_ptr->ignore_uart) acpi_get_spcr_uart_addr(); + + acpi_put_table((struct acpi_table_header *)stao_ptr); } acpi_gpe_apply_masked_gpes(); acpi_update_all_gpes(); + /* + * Although we call __add_memory() that is documented to require the + * device_hotplug_lock, it is not necessary here because this is an + * early code when userspace or any other code path cannot trigger + * hotplug/hotunplug operations. + */ mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. */ - result = acpi_bus_scan(ACPI_ROOT_OBJECT); - if (result) - goto out; + if (acpi_bus_scan(ACPI_ROOT_OBJECT)) + goto unlock; - result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root); - if (result) - goto out; + acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT); + if (!acpi_root) + goto unlock; /* Fixed feature devices do not exist on HW-reduced platform */ - if (!acpi_gbl_reduced_hardware) { - result = acpi_bus_scan_fixed(); - if (result) { - acpi_detach_data(acpi_root->handle, - acpi_scan_drop_device); - acpi_device_del(acpi_root); - put_device(&acpi_root->dev); - goto out; - } - } + if (!acpi_gbl_reduced_hardware) + acpi_bus_scan_fixed(); + + acpi_turn_off_unused_power_resources(); acpi_scan_initialized = true; - out: +unlock: mutex_unlock(&acpi_scan_lock); - return result; } static struct acpi_probe_entry *ape; static int acpi_probe_count; static DEFINE_MUTEX(acpi_probe_mutex); -static int __init acpi_match_madt(struct acpi_subtable_header *header, +static int __init acpi_match_madt(union acpi_subtable_headers *header, const unsigned long end) { - if (!ape->subtable_valid || ape->subtable_valid(header, ape)) + if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape)) if (!ape->probe_subtbl(header, end)) acpi_probe_count++; return 0; } +void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { } + int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) { int count = 0; @@ -2258,8 +2787,9 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) return 0; mutex_lock(&acpi_probe_mutex); + arch_sort_irqchip_probe(ap_head, nr); for (ape = ap_head; nr; ape++, nr--) { - if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) { + if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { acpi_probe_count = 0; acpi_table_parse_madt(ape->type, acpi_match_madt, 0); count += acpi_probe_count; @@ -2275,46 +2805,28 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) return count; } -struct acpi_table_events_work { - struct work_struct work; - void *table; - u32 event; -}; - static void acpi_table_events_fn(struct work_struct *work) { - struct acpi_table_events_work *tew; + acpi_scan_lock_acquire(); + acpi_bus_scan(ACPI_ROOT_OBJECT); + acpi_scan_lock_release(); - tew = container_of(work, struct acpi_table_events_work, work); - - if (tew->event == ACPI_TABLE_EVENT_LOAD) { - acpi_scan_lock_acquire(); - acpi_bus_scan(ACPI_ROOT_OBJECT); - acpi_scan_lock_release(); - } - - kfree(tew); + kfree(work); } -void acpi_scan_table_handler(u32 event, void *table, void *context) +void acpi_scan_table_notify(void) { - struct acpi_table_events_work *tew; + struct work_struct *work; if (!acpi_scan_initialized) return; - if (event != ACPI_TABLE_EVENT_LOAD) - return; - - tew = kmalloc(sizeof(*tew), GFP_KERNEL); - if (!tew) + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) return; - INIT_WORK(&tew->work, acpi_table_events_fn); - tew->table = table; - tew->event = event; - - schedule_work(&tew->work); + INIT_WORK(work, acpi_table_events_fn); + schedule_work(work); } int acpi_reconfig_notifier_register(struct notifier_block *nb) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 403c4ff15349..66ec81e306d4 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * sleep.c - ACPI sleep support. * @@ -5,11 +6,10 @@ * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2000-2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab - * - * This file is released under the GPLv2. - * */ +#define pr_fmt(fmt) "ACPI: PM: " fmt + #include <linux/delay.h> #include <linux/irq.h> #include <linux/dmi.h> @@ -43,7 +43,7 @@ static void acpi_sleep_tts_switch(u32 acpi_state) * OS can't evaluate the _TTS object correctly. Some warning * message will be printed. But it won't break anything. */ - printk(KERN_NOTICE "Failure in evaluating _TTS object\n"); + pr_notice("Failure in evaluating _TTS object\n"); } } @@ -60,26 +60,31 @@ static struct notifier_block tts_notifier = { .priority = 0, }; +#ifndef acpi_skip_set_wakeup_address +#define acpi_skip_set_wakeup_address() false +#endif + static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP + unsigned long acpi_wakeup_address; + /* do we have a wakeup address for S2 and S3? */ - if (acpi_state == ACPI_STATE_S3) { + if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) { + acpi_wakeup_address = acpi_get_wakeup_address(); if (!acpi_wakeup_address) return -EFAULT; acpi_set_waking_vector(acpi_wakeup_address); } - ACPI_FLUSH_CPU_CACHE(); #endif - printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", - acpi_state); + pr_info("Preparing to enter system sleep state S%d\n", acpi_state); acpi_enable_wakeup_devices(acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0; } -static bool acpi_sleep_state_supported(u8 sleep_state) +bool acpi_sleep_state_supported(u8 sleep_state) { acpi_status status; u8 type_a, type_b; @@ -160,11 +165,11 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) return 0; } -static bool acpi_sleep_no_lps0; +bool acpi_sleep_default_s3; -static int __init init_no_lps0(const struct dmi_system_id *d) +static int __init init_default_s3(const struct dmi_system_id *d) { - acpi_sleep_no_lps0 = true; + acpi_sleep_default_s3 = true; return 0; } @@ -347,6 +352,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { }, }, /* + * The ASUS ROG M16 from 2023 has many events which wake it from s2idle + * resulting in excessive battery drain and risk of laptop overheating, + * these events can be caused by the MMC or y AniMe display if installed. + * The match is valid for all of the GU604V<x> range. + */ + { + .callback = init_default_s3, + .ident = "ASUS ROG Zephyrus M16 (2023)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"), + }, + }, + /* * https://bugzilla.kernel.org/show_bug.cgi?id=189431 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory * saving during S3. @@ -359,17 +378,12 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), }, }, - /* - * https://bugzilla.kernel.org/show_bug.cgi?id=196907 - * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power - * S0 Idle firmware interface. - */ { - .callback = init_no_lps0, - .ident = "Dell XPS13 9360", + .callback = init_nvs_save_s3, + .ident = "Lenovo G40-45", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), }, }, /* @@ -378,7 +392,7 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { * https://bugzilla.kernel.org/show_bug.cgi?id=199057). */ { - .callback = init_no_lps0, + .callback = init_default_s3, .ident = "ThinkPad X1 Tablet(2016)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -418,7 +432,7 @@ static int acpi_pm_freeze(void) } /** - * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. + * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. */ static int acpi_pm_pre_suspend(void) { @@ -454,14 +468,6 @@ static int acpi_pm_prepare(void) return error; } -static int find_powerf_dev(struct device *dev, void *data) -{ - struct acpi_device *device = to_acpi_device(dev); - const char *hid = acpi_device_hid(device); - - return !strcmp(hid, ACPI_BUTTON_HID_POWERF); -} - /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * @@ -470,7 +476,7 @@ static int find_powerf_dev(struct device *dev, void *data) */ static void acpi_pm_finish(void) { - struct device *pwr_btn_dev; + struct acpi_device *pwr_btn_adev; u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); @@ -479,8 +485,7 @@ static void acpi_pm_finish(void) if (acpi_state == ACPI_STATE_S0) return; - printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", - acpi_state); + pr_info("Waking up from system sleep state S%d\n", acpi_state); acpi_disable_wakeup_devices(acpi_state); acpi_leave_sleep_state(acpi_state); @@ -501,16 +506,17 @@ static void acpi_pm_finish(void) return; pwr_btn_event_pending = false; - pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, - find_powerf_dev); - if (pwr_btn_dev) { - pm_wakeup_event(pwr_btn_dev, 0); - put_device(pwr_btn_dev); + pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF, + NULL, -1); + if (pwr_btn_adev) { + pm_wakeup_event(&pwr_btn_adev->dev, 0); + acpi_dev_put(pwr_btn_adev); } } /** * acpi_pm_start - Start system PM transition. + * @acpi_state: The target ACPI power state to transition to. */ static void acpi_pm_start(u32 acpi_state) { @@ -534,8 +540,9 @@ static void acpi_pm_end(void) acpi_sleep_tts_switch(acpi_target_sleep_state); } #else /* !CONFIG_ACPI_SLEEP */ +#define sleep_no_lps0 (1) #define acpi_target_sleep_state ACPI_STATE_S0 -#define acpi_sleep_no_lps0 (false) +#define acpi_sleep_default_s3 (1) static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ @@ -548,8 +555,9 @@ static u32 acpi_suspend_states[] = { }; /** - * acpi_suspend_begin - Set the target system sleep state to the state - * associated with given @pm_state, if supported. + * acpi_suspend_begin - Set the target system sleep state to the state + * associated with given @pm_state, if supported. + * @pm_state: The target system power management state. */ static int acpi_suspend_begin(suspend_state_t pm_state) { @@ -585,8 +593,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) u32 acpi_state = acpi_target_sleep_state; int error; - ACPI_FLUSH_CPU_CACHE(); - trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true); switch (acpi_state) { case ACPI_STATE_S1: @@ -600,7 +606,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) error = acpi_suspend_lowlevel(); if (error) return error; - pr_info(PREFIX "Low-level resume complete\n"); + pr_info("Low-level resume complete\n"); pm_set_resume_via_firmware(); break; } @@ -634,11 +640,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) } /* - * Disable and clear GPE status before interrupt is enabled. Some GPEs - * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. - * acpi_leave_sleep_state will reenable specific GPEs later + * Disable all GPE and clear their status bits before interrupts are + * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can + * prevent them from producing spurious interrupts. + * + * acpi_leave_sleep_state() will reenable specific GPEs later. + * + * Because this code runs on one CPU with disabled interrupts (all of + * the other CPUs are offline at this time), it need not acquire any + * sleeping locks which may trigger an implicit preemption point even + * if there is no contention, so avoid doing that by using a low-level + * library routine here. */ - acpi_disable_all_gpes(); + acpi_hw_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions(); @@ -673,10 +687,11 @@ static const struct platform_suspend_ops acpi_suspend_ops = { }; /** - * acpi_suspend_begin_old - Set the target system sleep state to the - * state associated with given @pm_state, if supported, and - * execute the _PTS control method. This function is used if the - * pre-ACPI 2.0 suspend ordering has been requested. + * acpi_suspend_begin_old - Set the target system sleep state to the + * state associated with given @pm_state, if supported, and + * execute the _PTS control method. This function is used if the + * pre-ACPI 2.0 suspend ordering has been requested. + * @pm_state: The target suspend state for the system. */ static int acpi_suspend_begin_old(suspend_state_t pm_state) { @@ -701,346 +716,124 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { .recover = acpi_pm_finish, }; -static bool s2idle_in_progress; static bool s2idle_wakeup; -/* - * On platforms supporting the Low Power S0 Idle interface there is an ACPI - * device object with the PNP0D80 compatible device ID (System Power Management - * Controller) and a specific _DSM method under it. That method, if present, - * can be used to indicate to the platform that the OS is transitioning into a - * low-power state in which certain types of activity are not desirable or that - * it is leaving such a state, which allows the platform to adjust its operation - * mode accordingly. - */ -static const struct acpi_device_id lps0_device_ids[] = { - {"PNP0D80", }, - {"", }, -}; - -#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" - -#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 -#define ACPI_LPS0_SCREEN_OFF 3 -#define ACPI_LPS0_SCREEN_ON 4 -#define ACPI_LPS0_ENTRY 5 -#define ACPI_LPS0_EXIT 6 - -static acpi_handle lps0_device_handle; -static guid_t lps0_dsm_guid; -static char lps0_dsm_func_mask; - -/* Device constraint entry structure */ -struct lpi_device_info { - char *name; - int enabled; - union acpi_object *package; -}; - -/* Constraint package structure */ -struct lpi_device_constraint { - int uid; - int min_dstate; - int function_states; -}; - -struct lpi_constraints { - acpi_handle handle; - int min_dstate; -}; - -static struct lpi_constraints *lpi_constraints_table; -static int lpi_constraints_table_size; - -static void lpi_device_get_constraints(void) +int acpi_s2idle_begin(void) { - union acpi_object *out_obj; - int i; - - out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, - 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, - NULL, ACPI_TYPE_PACKAGE); - - acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", - out_obj ? "successful" : "failed"); - - if (!out_obj) - return; - - lpi_constraints_table = kcalloc(out_obj->package.count, - sizeof(*lpi_constraints_table), - GFP_KERNEL); - if (!lpi_constraints_table) - goto free_acpi_buffer; - - acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); - - for (i = 0; i < out_obj->package.count; i++) { - struct lpi_constraints *constraint; - acpi_status status; - union acpi_object *package = &out_obj->package.elements[i]; - struct lpi_device_info info = { }; - int package_count = 0, j; - - if (!package) - continue; - - for (j = 0; j < package->package.count; ++j) { - union acpi_object *element = - &(package->package.elements[j]); - - switch (element->type) { - case ACPI_TYPE_INTEGER: - info.enabled = element->integer.value; - break; - case ACPI_TYPE_STRING: - info.name = element->string.pointer; - break; - case ACPI_TYPE_PACKAGE: - package_count = element->package.count; - info.package = element->package.elements; - break; - } - } - - if (!info.enabled || !info.package || !info.name) - continue; - - constraint = &lpi_constraints_table[lpi_constraints_table_size]; - - status = acpi_get_handle(NULL, info.name, &constraint->handle); - if (ACPI_FAILURE(status)) - continue; - - acpi_handle_debug(lps0_device_handle, - "index:%d Name:%s\n", i, info.name); - - constraint->min_dstate = -1; - - for (j = 0; j < package_count; ++j) { - union acpi_object *info_obj = &info.package[j]; - union acpi_object *cnstr_pkg; - union acpi_object *obj; - struct lpi_device_constraint dev_info; - - switch (info_obj->type) { - case ACPI_TYPE_INTEGER: - /* version */ - break; - case ACPI_TYPE_PACKAGE: - if (info_obj->package.count < 2) - break; - - cnstr_pkg = info_obj->package.elements; - obj = &cnstr_pkg[0]; - dev_info.uid = obj->integer.value; - obj = &cnstr_pkg[1]; - dev_info.min_dstate = obj->integer.value; - - acpi_handle_debug(lps0_device_handle, - "uid:%d min_dstate:%s\n", - dev_info.uid, - acpi_power_state_string(dev_info.min_dstate)); - - constraint->min_dstate = dev_info.min_dstate; - break; - } - } - - if (constraint->min_dstate < 0) { - acpi_handle_debug(lps0_device_handle, - "Incomplete constraint defined\n"); - continue; - } - - lpi_constraints_table_size++; - } - - acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); - -free_acpi_buffer: - ACPI_FREE(out_obj); + acpi_scan_lock_acquire(); + return 0; } -static void lpi_check_constraints(void) +int acpi_s2idle_prepare(void) { - int i; - - for (i = 0; i < lpi_constraints_table_size; ++i) { - acpi_handle handle = lpi_constraints_table[i].handle; - struct acpi_device *adev; - - if (!handle || acpi_bus_get_device(handle, &adev)) - continue; + if (acpi_sci_irq_valid()) { + int error; - acpi_handle_debug(handle, - "LPI: required min power state:%s current power state:%s\n", - acpi_power_state_string(lpi_constraints_table[i].min_dstate), - acpi_power_state_string(adev->power.state)); - - if (!adev->flags.power_manageable) { - acpi_handle_info(handle, "LPI: Device not power manageable\n"); - lpi_constraints_table[i].handle = NULL; - continue; - } + error = enable_irq_wake(acpi_sci_irq); + if (error) + pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n", + acpi_sci_irq, error); - if (adev->power.state < lpi_constraints_table[i].min_dstate) - acpi_handle_info(handle, - "LPI: Constraint not met; min power state:%s current power state:%s\n", - acpi_power_state_string(lpi_constraints_table[i].min_dstate), - acpi_power_state_string(adev->power.state)); + acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE); } -} -static void acpi_sleep_run_lps0_dsm(unsigned int func) -{ - union acpi_object *out_obj; + acpi_enable_wakeup_devices(ACPI_STATE_S0); - if (!(lps0_dsm_func_mask & (1 << func))) - return; - - out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL); - ACPI_FREE(out_obj); + /* Change the configuration of GPEs to avoid spurious wakeup. */ + acpi_enable_all_wakeup_gpes(); + acpi_os_wait_events_complete(); - acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n", - func, out_obj ? "successful" : "failed"); + s2idle_wakeup = true; + return 0; } -static int lps0_device_attach(struct acpi_device *adev, - const struct acpi_device_id *not_used) +bool acpi_s2idle_wake(void) { - union acpi_object *out_obj; - - if (lps0_device_handle) - return 0; - - if (acpi_sleep_no_lps0) { - acpi_handle_info(adev->handle, - "Low Power S0 Idle interface disabled\n"); - return 0; - } + if (!acpi_sci_irq_valid()) + return pm_wakeup_pending(); - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return 0; - - guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid); - /* Check if the _DSM is present and as expected. */ - out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL); - if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) { - char bitmask = *(char *)out_obj->buffer.pointer; - - lps0_dsm_func_mask = bitmask; - lps0_device_handle = adev->handle; + while (pm_wakeup_pending()) { /* - * Use suspend-to-idle by default if the default - * suspend mode was not set from the command line. + * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the + * SCI has not triggered while suspended, so bail out (the + * wakeup is pending anyway and the SCI is not the source of + * it). */ - if (mem_sleep_default > PM_SUSPEND_MEM) - mem_sleep_current = PM_SUSPEND_TO_IDLE; - - acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", - bitmask); + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) { + pm_pr_dbg("Wakeup unrelated to ACPI SCI\n"); + return true; + } - acpi_ec_mark_gpe_for_wake(); - } else { - acpi_handle_debug(adev->handle, - "_DSM function 0 evaluation failed\n"); - } - ACPI_FREE(out_obj); + /* + * If the status bit of any enabled fixed event is set, the + * wakeup is regarded as valid. + */ + if (acpi_any_fixed_event_status_set()) { + pm_pr_dbg("ACPI fixed event wakeup\n"); + return true; + } - lpi_device_get_constraints(); + /* Check wakeups from drivers sharing the SCI. */ + if (acpi_check_wakeup_handlers()) { + pm_pr_dbg("ACPI custom handler wakeup\n"); + return true; + } - return 0; -} + /* + * Check non-EC GPE wakeups and if there are none, cancel the + * SCI-related wakeup and dispatch the EC GPE. + */ + if (acpi_ec_dispatch_gpe()) { + pm_pr_dbg("ACPI non-EC GPE wakeup\n"); + return true; + } -static struct acpi_scan_handler lps0_handler = { - .ids = lps0_device_ids, - .attach = lps0_device_attach, -}; + acpi_os_wait_events_complete(); -static int acpi_s2idle_begin(void) -{ - acpi_scan_lock_acquire(); - s2idle_in_progress = true; - return 0; -} + /* + * The SCI is in the "suspended" state now and it cannot produce + * new wakeup events till the rearming below, so if any of them + * are pending here, they must be resulting from the processing + * of EC events above or coming from somewhere else. + */ + if (pm_wakeup_pending()) { + pm_pr_dbg("Wakeup after ACPI Notify sync\n"); + return true; + } -static int acpi_s2idle_prepare(void) -{ - if (lps0_device_handle) { - acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY); + pm_pr_dbg("Rearming ACPI SCI for wakeup\n"); - acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE); + pm_wakeup_clear(acpi_sci_irq); + rearm_wake_irq(acpi_sci_irq); } - if (acpi_sci_irq_valid()) - enable_irq_wake(acpi_sci_irq); - - /* Change the configuration of GPEs to avoid spurious wakeup. */ - acpi_enable_all_wakeup_gpes(); - acpi_os_wait_events_complete(); - return 0; + return false; } -static void acpi_s2idle_wake(void) +void acpi_s2idle_restore(void) { - if (!lps0_device_handle) - return; - - if (pm_debug_messages_on) - lpi_check_constraints(); - /* - * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means - * that the SCI has triggered while suspended, so cancel the wakeup in - * case it has not been a wakeup event (the GPEs will be checked later). + * Drain pending events before restoring the working-state configuration + * of GPEs. */ - if (acpi_sci_irq_valid() && - !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) { - pm_system_cancel_wakeup(); - s2idle_wakeup = true; - /* - * On some platforms with the LPS0 _DSM device noirq resume - * takes too much time for EC wakeup events to survive, so look - * for them now. - */ - acpi_ec_dispatch_gpe(); - } -} + acpi_os_wait_events_complete(); /* synchronize GPE processing */ + acpi_ec_flush_work(); /* flush the EC driver's workqueues */ + acpi_os_wait_events_complete(); /* synchronize Notify handling */ -static void acpi_s2idle_sync(void) -{ - /* - * Process all pending events in case there are any wakeup ones. - * - * The EC driver uses the system workqueue and an additional special - * one, so those need to be flushed too. - */ - acpi_os_wait_events_complete(); /* synchronize SCI IRQ handling */ - acpi_ec_flush_work(); - acpi_os_wait_events_complete(); /* synchronize Notify handling */ s2idle_wakeup = false; -} -static void acpi_s2idle_restore(void) -{ acpi_enable_all_runtime_gpes(); - if (acpi_sci_irq_valid()) - disable_irq_wake(acpi_sci_irq); + acpi_disable_wakeup_devices(ACPI_STATE_S0); - if (lps0_device_handle) { + if (acpi_sci_irq_valid()) { acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE); - - acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON); + disable_irq_wake(acpi_sci_irq); } } -static void acpi_s2idle_end(void) +void acpi_s2idle_end(void) { - s2idle_in_progress = false; acpi_scan_lock_release(); } @@ -1048,30 +841,38 @@ static const struct platform_s2idle_ops acpi_s2idle_ops = { .begin = acpi_s2idle_begin, .prepare = acpi_s2idle_prepare, .wake = acpi_s2idle_wake, - .sync = acpi_s2idle_sync, .restore = acpi_s2idle_restore, .end = acpi_s2idle_end, }; -static void acpi_sleep_suspend_setup(void) +void __weak acpi_s2idle_setup(void) { + if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) + pr_info("Efficient low-power S0 idle declared\n"); + + s2idle_set_ops(&acpi_s2idle_ops); +} + +static void __init acpi_sleep_suspend_setup(void) +{ + bool suspend_ops_needed = false; int i; for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) - if (acpi_sleep_state_supported(i)) + if (acpi_sleep_state_supported(i)) { sleep_states[i] = 1; + suspend_ops_needed = true; + } - suspend_set_ops(old_suspend_ordering ? - &acpi_suspend_ops_old : &acpi_suspend_ops); + if (suspend_ops_needed) + suspend_set_ops(old_suspend_ordering ? + &acpi_suspend_ops_old : &acpi_suspend_ops); - acpi_scan_add_handler(&lps0_handler); - s2idle_set_ops(&acpi_s2idle_ops); + acpi_s2idle_setup(); } #else /* !CONFIG_SUSPEND */ -#define s2idle_in_progress (false) #define s2idle_wakeup (false) -#define lps0_device_handle (NULL) static inline void acpi_sleep_suspend_setup(void) {} #endif /* !CONFIG_SUSPEND */ @@ -1080,21 +881,16 @@ bool acpi_s2idle_wakeup(void) return s2idle_wakeup; } -bool acpi_sleep_no_ec_events(void) -{ - return !s2idle_in_progress || !lps0_device_handle; -} - #ifdef CONFIG_PM_SLEEP static u32 saved_bm_rld; -static int acpi_save_bm_rld(void) +static int acpi_save_bm_rld(void *data) { acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); return 0; } -static void acpi_restore_bm_rld(void) +static void acpi_restore_bm_rld(void *data) { u32 resumed_bm_rld = 0; @@ -1105,14 +901,18 @@ static void acpi_restore_bm_rld(void) acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } -static struct syscore_ops acpi_sleep_syscore_ops = { +static const struct syscore_ops acpi_sleep_syscore_ops = { .suspend = acpi_save_bm_rld, .resume = acpi_restore_bm_rld, }; +static struct syscore acpi_sleep_syscore = { + .ops = &acpi_sleep_syscore_ops, +}; + static void acpi_sleep_syscore_init(void) { - register_syscore_ops(&acpi_sleep_syscore_ops); + register_syscore(&acpi_sleep_syscore); } #else static inline void acpi_sleep_syscore_init(void) {} @@ -1121,30 +921,27 @@ static inline void acpi_sleep_syscore_init(void) {} #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; -static bool nosigcheck; - -void __init acpi_no_s4_hw_signature(void) -{ - nosigcheck = true; -} +int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */ -static int acpi_hibernation_begin(void) +static int acpi_hibernation_begin(pm_message_t stage) { - int error; + if (!nvs_nosave) { + int error = suspend_nvs_alloc(); + if (error) + return error; + } - error = nvs_nosave ? 0 : suspend_nvs_alloc(); - if (!error) - acpi_pm_start(ACPI_STATE_S4); + if (stage.event == PM_EVENT_HIBERNATE) + pm_set_suspend_via_firmware(); - return error; + acpi_pm_start(ACPI_STATE_S4); + return 0; } static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; - ACPI_FLUSH_CPU_CACHE(); - /* This shouldn't return. If it returns, we have a problem */ status = acpi_enter_sleep_state(ACPI_STATE_S4); /* Reprogram control registers */ @@ -1165,7 +962,7 @@ static void acpi_hibernation_leave(void) acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) - pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n"); + pr_crit("Hardware changed while hibernated, success doubtful!\n"); /* Restore the NVS memory area */ suspend_nvs_restore(); /* Allow EC transactions to happen. */ @@ -1191,12 +988,13 @@ static const struct platform_hibernation_ops acpi_hibernation_ops = { }; /** - * acpi_hibernation_begin_old - Set the target system sleep state to - * ACPI_STATE_S4 and execute the _PTS control method. This - * function is used if the pre-ACPI 2.0 suspend ordering has been - * requested. + * acpi_hibernation_begin_old - Set the target system sleep state to + * ACPI_STATE_S4 and execute the _PTS control method. This + * function is used if the pre-ACPI 2.0 suspend ordering has been + * requested. + * @stage: The power management event message. */ -static int acpi_hibernation_begin_old(void) +static int acpi_hibernation_begin_old(pm_message_t stage) { int error; /* @@ -1207,16 +1005,21 @@ static int acpi_hibernation_begin_old(void) acpi_sleep_tts_switch(ACPI_STATE_S4); error = acpi_sleep_prepare(ACPI_STATE_S4); + if (error) + return error; - if (!error) { - if (!nvs_nosave) - error = suspend_nvs_alloc(); - if (!error) { - acpi_target_sleep_state = ACPI_STATE_S4; - acpi_scan_lock_acquire(); - } + if (!nvs_nosave) { + error = suspend_nvs_alloc(); + if (error) + return error; } - return error; + + if (stage.event == PM_EVENT_HIBERNATE) + pm_set_suspend_via_firmware(); + + acpi_target_sleep_state = ACPI_STATE_S4; + acpi_scan_lock_acquire(); + return 0; } /* @@ -1244,31 +1047,49 @@ static void acpi_sleep_hibernate_setup(void) hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; - if (nosigcheck) + if (!acpi_check_s4_hw_signature) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); - if (facs) + if (facs) { + /* + * s4_hardware_signature is the local variable which is just + * used to warn about mismatch after we're attempting to + * resume (in violation of the ACPI specification.) + */ s4_hardware_signature = facs->hardware_signature; + + if (acpi_check_s4_hw_signature > 0) { + /* + * If we're actually obeying the ACPI specification + * then the signature is written out as part of the + * swsusp header, in order to allow the boot kernel + * to gracefully decline to resume. + */ + swsusp_hardware_signature = facs->hardware_signature; + } + } } #else /* !CONFIG_HIBERNATION */ static inline void acpi_sleep_hibernate_setup(void) {} #endif /* !CONFIG_HIBERNATION */ -static void acpi_power_off_prepare(void) +static int acpi_power_off_prepare(struct sys_off_data *data) { /* Prepare to power off the system */ acpi_sleep_prepare(ACPI_STATE_S5); acpi_disable_all_gpes(); acpi_os_wait_events_complete(); + return NOTIFY_DONE; } -static void acpi_power_off(void) +static int acpi_power_off(struct sys_off_data *data) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ - printk(KERN_DEBUG "%s called\n", __func__); + pr_debug("%s called\n", __func__); local_irq_disable(); acpi_enter_sleep_state(ACPI_STATE_S5); + return NOTIFY_DONE; } int __init acpi_sleep_init(void) @@ -1287,8 +1108,22 @@ int __init acpi_sleep_init(void) if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; - pm_power_off_prepare = acpi_power_off_prepare; - pm_power_off = acpi_power_off; + + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE, + SYS_OFF_PRIO_FIRMWARE, + acpi_power_off_prepare, NULL); + + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + acpi_power_off, NULL); + + /* + * Windows uses S5 for reboot, so some BIOSes depend on it to + * perform proper reboot. + */ + register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE, + SYS_OFF_PRIO_FIRMWARE, + acpi_power_off_prepare, NULL); } else { acpi_no_s5 = true; } @@ -1298,7 +1133,7 @@ int __init acpi_sleep_init(void) if (sleep_states[i]) pos += sprintf(pos, " S%d", i); } - pr_info(PREFIX "(supports%s)\n", supported); + pr_info("(supports%s)\n", supported); /* * Register the tts_notifier to reboot notifier list so that the _TTS diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 41675d24a9bc..9c3cb109c5d2 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -2,15 +2,29 @@ extern void acpi_enable_wakeup_devices(u8 sleep_state); extern void acpi_disable_wakeup_devices(u8 sleep_state); +extern bool acpi_check_wakeup_handlers(void); extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; extern void acpi_resume_power_resources(void); -extern void acpi_turn_off_unused_power_resources(void); static inline acpi_status acpi_set_waking_vector(u32 wakeup_address) { return acpi_set_firmware_waking_vector( (acpi_physical_address)wakeup_address, 0); } + +extern int acpi_s2idle_begin(void); +extern int acpi_s2idle_prepare(void); +extern bool acpi_s2idle_wake(void); +extern void acpi_s2idle_restore(void); +extern void acpi_s2idle_end(void); + +extern void acpi_s2idle_setup(void); + +#ifdef CONFIG_ACPI_SLEEP +extern bool acpi_sleep_default_s3; +#else +#define acpi_sleep_default_s3 (1) +#endif diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index c336784d0bcb..73cb933fdc89 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012, Intel Corporation * Copyright (c) 2015, Red Hat, Inc. * Copyright (c) 2015, 2016 Linaro Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #define pr_fmt(fmt) "ACPI: SPCR: " fmt @@ -28,7 +24,7 @@ EXPORT_SYMBOL(qdf2400_e44_present); /* * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. - * Detect them by examining the OEM fields in the SPCR header, similiar to PCI + * Detect them by examining the OEM fields in the SPCR header, similar to PCI * quirk detection in pci_mcfg.c. */ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) @@ -75,7 +71,6 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb) /** * acpi_parse_spcr() - parse ACPI SPCR table and add preferred console - * * @enable_earlycon: set up earlycon for the console specified by the table * @enable_console: setup the console specified by the table. * @@ -86,7 +81,6 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb) * * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called * from arch initialization code as soon as the DT/ACPI decision is made. - * */ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) { @@ -101,9 +95,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) if (acpi_disabled) return -ENODEV; - status = acpi_get_table(ACPI_SIG_SPCR, 0, - (struct acpi_table_header **)&table); - + status = acpi_get_table(ACPI_SIG_SPCR, 0, (struct acpi_table_header **)&table); if (ACPI_FAILURE(status)) return -ENOENT; @@ -111,11 +103,16 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) pr_info("SPCR table version %d\n", table->header.revision); if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - switch (ACPI_ACCESS_BIT_WIDTH(( - table->serial_port.access_width))) { + u32 bit_width = table->serial_port.access_width; + + if (bit_width > ACPI_ACCESS_BIT_MAX) { + pr_err(FW_BUG "Unacceptable wide SPCR Access Width. Defaulting to byte size\n"); + bit_width = ACPI_ACCESS_BIT_DEFAULT; + } + switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) { default: - pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); - /* fall through */ + pr_err(FW_BUG "Unexpected SPCR Access Width. Defaulting to byte size\n"); + fallthrough; case 8: iotype = "mmio"; break; @@ -132,7 +129,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) switch (table->interface_type) { case ACPI_DBG2_ARM_SBSA_32BIT: iotype = "mmio32"; - /* fall through */ + fallthrough; case ACPI_DBG2_ARM_PL011: case ACPI_DBG2_ARM_SBSA_GENERIC: case ACPI_DBG2_BCM2835: @@ -140,14 +137,27 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) break; case ACPI_DBG2_16550_COMPATIBLE: case ACPI_DBG2_16550_SUBSET: + case ACPI_DBG2_16550_WITH_GAS: + case ACPI_DBG2_16550_NVIDIA: uart = "uart"; break; + case ACPI_DBG2_RISCV_SBI_CON: + uart = "sbi"; + break; default: err = -ENOENT; goto done; } - switch (table->baud_rate) { + /* + * SPCR 1.09 defines Precise Baud Rate Filed contains a specific + * non-zero baud rate which overrides the value of the Configured + * Baud Rate field. If this field is zero or not present, Configured + * Baud Rate is used. + */ + if (table->header.revision >= 4 && table->precise_baudrate) + baud_rate = table->precise_baudrate; + else switch (table->baud_rate) { case 0: /* * SPCR 1.04 defines 0 as a preconfigured state of UART. @@ -199,7 +209,8 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) if (xgene_8250_erratum_present(table)) { iotype = "mmio32"; - /* for xgene v1 and v2 we don't know the clock rate of the + /* + * For xgene v1 and v2 we don't know the clock rate of the * UART so don't attempt to change to the baud rate state * in the table because driver cannot calculate the dividers */ diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 41324f0b1bee..e596224302f4 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -5,16 +5,15 @@ #define pr_fmt(fmt) "ACPI: " fmt +#include <linux/acpi.h> +#include <linux/bitmap.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kstrtox.h> #include <linux/moduleparam.h> -#include <linux/acpi.h> #include "internal.h" -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("sysfs"); - #ifdef CONFIG_ACPI_DEBUG /* * ACPI debug sysfs I/F, including: @@ -51,21 +50,6 @@ static const struct acpi_dlayer acpi_debug_layers[] = { ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), ACPI_DEBUG_INIT(ACPI_COMPILER), ACPI_DEBUG_INIT(ACPI_TOOLS), - - ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT), - ACPI_DEBUG_INIT(ACPI_AC_COMPONENT), - ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT), - ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT), - ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT), - ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT), - ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT), - ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT), - ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT), - ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT), - ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT), - ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT), - ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT), - ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT), }; static const struct acpi_dlevel acpi_debug_levels[] = { @@ -214,7 +198,7 @@ static int param_set_trace_method_name(const char *val, static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) { - return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name); + return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name); } static const struct kernel_param_ops param_ops_trace_method = { @@ -271,17 +255,13 @@ static int param_set_trace_state(const char *val, static int param_get_trace_state(char *buffer, const struct kernel_param *kp) { if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) - return sprintf(buffer, "disable"); - else { - if (acpi_gbl_trace_method_name) { - if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) - return sprintf(buffer, "method-once"); - else - return sprintf(buffer, "method"); - } else - return sprintf(buffer, "enable"); - } - return 0; + return sprintf(buffer, "disable\n"); + if (!acpi_gbl_trace_method_name) + return sprintf(buffer, "enable\n"); + if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) + return sprintf(buffer, "method-once\n"); + else + return sprintf(buffer, "method\n"); } module_param_call(trace_state, param_set_trace_state, param_get_trace_state, @@ -302,7 +282,7 @@ static int param_get_acpica_version(char *buffer, { int result; - result = sprintf(buffer, "%x", ACPI_CA_VERSION); + result = sprintf(buffer, "%x\n", ACPI_CA_VERSION); return result; } @@ -327,9 +307,9 @@ static struct kobject *hotplug_kobj; struct acpi_table_attr { struct bin_attribute attr; - char name[ACPI_NAME_SIZE]; + char name[ACPI_NAMESEG_SIZE]; int instance; - char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE]; + char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE]; struct list_head node; }; @@ -339,7 +319,7 @@ struct acpi_data_attr { }; static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) { struct acpi_table_attr *table_attr = @@ -368,22 +348,21 @@ static int acpi_table_attr_init(struct kobject *tables_obj, char instance_str[ACPI_INST_SIZE]; sysfs_attr_init(&table_attr->attr.attr); - ACPI_MOVE_NAME(table_attr->name, table_header->signature); + ACPI_COPY_NAMESEG(table_attr->name, table_header->signature); list_for_each_entry(attr, &acpi_table_attr_list, node) { - if (ACPI_COMPARE_NAME(table_attr->name, attr->name)) + if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name)) if (table_attr->instance < attr->instance) table_attr->instance = attr->instance; } table_attr->instance++; if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { - pr_warn("%4.4s: too many table instances\n", - table_attr->name); + pr_warn("%4.4s: too many table instances\n", table_attr->name); return -ERANGE; } - ACPI_MOVE_NAME(table_attr->filename, table_header->signature); - table_attr->filename[ACPI_NAME_SIZE] = '\0'; + ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature); + table_attr->filename[ACPI_NAMESEG_SIZE] = '\0'; if (table_attr->instance > 1 || (table_attr->instance == 1 && !acpi_get_table (table_header->signature, 2, &header))) { @@ -406,8 +385,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) switch (event) { case ACPI_TABLE_EVENT_INSTALL: - table_attr = - kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); + table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); if (!table_attr) return AE_NO_MEMORY; @@ -434,23 +412,34 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) } static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) { struct acpi_data_attr *data_attr; void __iomem *base; - ssize_t rc; + ssize_t size; data_attr = container_of(bin_attr, struct acpi_data_attr, attr); + size = data_attr->attr.size; + + if (offset < 0) + return -EINVAL; - base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); + if (offset >= size) + return 0; + + if (count > size - offset) + count = size - offset; + + base = acpi_os_map_iomem(data_attr->addr, size); if (!base) return -ENOMEM; - rc = memory_read_from_buffer(buf, count, &offset, base, - data_attr->attr.size); - acpi_os_unmap_memory(base, data_attr->attr.size); - return rc; + memcpy_fromio(buf, base + offset, count); + + acpi_os_unmap_iomem(base, size); + + return count; } static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) @@ -469,11 +458,28 @@ static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); } +static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr) +{ + struct acpi_table_ccel *ccel = th; + + if (ccel->header.length < sizeof(struct acpi_table_ccel) || + !ccel->log_area_start_address || !ccel->log_area_minimum_length) { + kfree(data_attr); + return -EINVAL; + } + data_attr->addr = ccel->log_area_start_address; + data_attr->attr.size = ccel->log_area_minimum_length; + data_attr->attr.attr.name = "CCEL"; + + return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); +} + static struct acpi_data_obj { char *name; int (*fn)(void *, struct acpi_data_attr *); } acpi_data_objs[] = { { ACPI_SIG_BERT, acpi_bert_data_init }, + { ACPI_SIG_CCEL, acpi_ccel_data_init }, }; #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) @@ -484,7 +490,7 @@ static int acpi_table_data_init(struct acpi_table_header *th) int i; for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { - if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) { + if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) { data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); if (!data_attr) return -ENOMEM; @@ -600,8 +606,6 @@ static void delete_gpe_attr_array(void) kfree(counter_attrs); } kfree(all_attrs); - - return; } static void gpe_count(u32 gpe_number) @@ -616,8 +620,6 @@ static void gpe_count(u32 gpe_number) else all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].count++; - - return; } static void fixed_event_count(u32 event_number) @@ -630,8 +632,6 @@ static void fixed_event_count(u32 event_number) else all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].count++; - - return; } static void acpi_global_event_handler(u32 event_type, acpi_handle device, @@ -648,26 +648,28 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device, } } -static int get_status(u32 index, acpi_event_status *status, +static int get_status(u32 index, acpi_event_status *ret, acpi_handle *handle) { - int result; + acpi_status status; if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) return -EINVAL; if (index < num_gpes) { - result = acpi_get_gpe_device(index, handle); - if (result) { - ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, - "Invalid GPE 0x%x", index)); - return result; + status = acpi_get_gpe_device(index, handle); + if (ACPI_FAILURE(status)) { + pr_warn("Invalid GPE 0x%x", index); + return -ENXIO; } - result = acpi_get_gpe_status(*handle, index, status); - } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) - result = acpi_get_event_status(index - num_gpes, status); + status = acpi_get_gpe_status(*handle, index, ret); + } else { + status = acpi_get_event_status(index - num_gpes, ret); + } + if (ACPI_FAILURE(status)) + return -EIO; - return result; + return 0; } static ssize_t counter_show(struct kobject *kobj, @@ -753,8 +755,7 @@ static ssize_t counter_set(struct kobject *kobj, goto end; if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { - printk(KERN_WARNING PREFIX - "Can not change Invalid GPE/Fixed Event status\n"); + pr_warn("Can not change Invalid GPE/Fixed Event status\n"); return -EINVAL; } @@ -812,20 +813,26 @@ end: * the GPE flooding for GPE 00, they need to specify the following boot * parameter: * acpi_mask_gpe=0x00 + * Note, the parameter can be a list (see bitmap_parselist() for the details). * The masking status can be modified by the following runtime controlling * interface: * echo unmask > /sys/firmware/acpi/interrupts/gpe00 */ -#define ACPI_MASKABLE_GPE_MAX 0xFF +#define ACPI_MASKABLE_GPE_MAX 0x100 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; static int __init acpi_gpe_set_masked_gpes(char *val) { + int ret; u8 gpe; - if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) - return -EINVAL; - set_bit(gpe, acpi_masked_gpes_map); + ret = kstrtou8(val, 0, &gpe); + if (ret) { + ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX); + if (ret) + return ret; + } else + set_bit(gpe, acpi_masked_gpes_map); return 1; } @@ -835,7 +842,7 @@ void __init acpi_gpe_apply_masked_gpes(void) { acpi_handle handle; acpi_status status; - u8 gpe; + u16 gpe; for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { status = acpi_get_gpe_device(gpe, &handle); @@ -857,13 +864,11 @@ void acpi_irq_stats_init(void) num_gpes = acpi_current_gpe_count; num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; - all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *), - GFP_KERNEL); + all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL); if (all_attrs == NULL) return; - all_counters = kcalloc(num_counters, sizeof(struct event_counter), - GFP_KERNEL); + all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL); if (all_counters == NULL) goto fail; @@ -871,8 +876,7 @@ void acpi_irq_stats_init(void) if (ACPI_FAILURE(status)) goto fail; - counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute), - GFP_KERNEL); + counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL); if (counter_attrs == NULL) goto fail; @@ -922,7 +926,6 @@ void acpi_irq_stats_init(void) fail: delete_gpe_attr_array(); - return; } static void __exit interrupt_stats_exit(void) @@ -930,31 +933,24 @@ static void __exit interrupt_stats_exit(void) sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group); delete_gpe_attr_array(); - - return; } -static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = - __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); +static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile); -static ssize_t hotplug_enabled_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); return sprintf(buf, "%d\n", hotplug->enabled); } -static ssize_t hotplug_enabled_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t size) +static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t size) { struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); unsigned int val; @@ -966,18 +962,17 @@ static ssize_t hotplug_enabled_store(struct kobject *kobj, return size; } -static struct kobj_attribute hotplug_enabled_attr = - __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show, - hotplug_enabled_store); +static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled); static struct attribute *hotplug_profile_attrs[] = { &hotplug_enabled_attr.attr, NULL }; +ATTRIBUTE_GROUPS(hotplug_profile); -static struct kobj_type acpi_hotplug_profile_ktype = { +static const struct kobj_type acpi_hotplug_profile_ktype = { .sysfs_ops = &kobj_sysfs_ops, - .default_attrs = hotplug_profile_attrs, + .default_groups = hotplug_profile_groups, }; void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, @@ -990,14 +985,16 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, error = kobject_init_and_add(&hotplug->kobj, &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); - if (error) + if (error) { + kobject_put(&hotplug->kobj); goto err_out; + } kobject_uevent(&hotplug->kobj, KOBJ_ADD); return; err_out: - pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name); + pr_err("Unable to add hotplug profile '%s'\n", name); } static ssize_t force_remove_show(struct kobject *kobj, @@ -1013,7 +1010,7 @@ static ssize_t force_remove_store(struct kobject *kobj, bool val; int ret; - ret = strtobool(buf, &val); + ret = kstrtobool(buf, &val); if (ret < 0) return ret; @@ -1024,9 +1021,7 @@ static ssize_t force_remove_store(struct kobject *kobj, return size; } -static const struct kobj_attribute force_remove_attr = - __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show, - force_remove_store); +static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove); int __init acpi_sysfs_init(void) { diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 48eabb6c2d4f..4286e4af1092 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -1,22 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_tables.c - ACPI Boot-Time Table Parsing * * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * */ /* Uncomment next line to get verbose printout */ @@ -34,6 +20,8 @@ #include <linux/memblock.h> #include <linux/earlycpio.h> #include <linux/initrd.h> +#include <linux/security.h> +#include <linux/kmemleak.h> #include "internal.h" #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -47,13 +35,13 @@ static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata; -static int acpi_apic_instance __initdata; +static int acpi_apic_instance __initdata_or_acpilib; /* * Disable table checksum verification for the early stage due to the size * limitation of the current x86 early mapping implementation. */ -static bool acpi_verify_table_checksum __initdata = false; +static bool acpi_verify_table_checksum __initdata_or_acpilib = false; void acpi_table_print_madt_entry(struct acpi_subtable_header *header) { @@ -68,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) (struct acpi_madt_local_apic *)header; pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", p->processor_id, p->id, - (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED)); } break; @@ -78,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) (struct acpi_madt_local_x2apic *)header; pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", p->local_apic_id, p->uid, - (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED)); } break; @@ -151,8 +139,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) { struct acpi_madt_local_apic_override *p = (struct acpi_madt_local_apic_override *)header; - pr_info("LAPIC_ADDR_OVR (address[%p])\n", - (void *)(unsigned long)p->address); + pr_info("LAPIC_ADDR_OVR (address[0x%llx])\n", + p->address); } break; @@ -172,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) (struct acpi_madt_local_sapic *)header; pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", p->processor_id, p->id, p->eid, - (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED)); } break; @@ -195,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n", p->uid, p->base_address, p->arm_mpidr, - (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); + str_enabled_disabled(p->flags & ACPI_MADT_ENABLED)); } break; @@ -210,115 +198,50 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) } break; - default: - pr_warn("Found unsupported MADT entry (type = 0x%x)\n", - header->type); - break; - } -} - -/** - * acpi_parse_entries_array - for each proc_num find a suitable subtable - * - * @id: table id (for debugging purposes) - * @table_size: size of the root table - * @table_header: where does the table start? - * @proc: array of acpi_subtable_proc struct containing entry id - * and associated handler with it - * @proc_num: how big proc is? - * @max_entries: how many entries can we process? - * - * For each proc_num find a subtable with proc->id and run proc->handler - * on it. Assumption is that there's only single handler for particular - * entry id. - * - * The table_size is not the size of the complete ACPI table (the length - * field in the header struct), but only the size of the root table; i.e., - * the offset from the very first byte of the complete ACPI table, to the - * first byte of the very first subtable. - * - * On success returns sum of all matching entries for all proc handlers. - * Otherwise, -ENODEV or -EINVAL is returned. - */ -static int __init -acpi_parse_entries_array(char *id, unsigned long table_size, - struct acpi_table_header *table_header, - struct acpi_subtable_proc *proc, int proc_num, - unsigned int max_entries) -{ - struct acpi_subtable_header *entry; - unsigned long table_end; - int count = 0; - int errs = 0; - int i; - - if (acpi_disabled) - return -ENODEV; - - if (!id) - return -EINVAL; - - if (!table_size) - return -EINVAL; - - if (!table_header) { - pr_warn("%4.4s not present\n", id); - return -ENODEV; - } - - table_end = (unsigned long)table_header + table_header->length; - - /* Parse all entries looking for a match. */ + case ACPI_MADT_TYPE_MULTIPROC_WAKEUP: + { + struct acpi_madt_multiproc_wakeup *p = + (struct acpi_madt_multiproc_wakeup *)header; + u64 reset_vector = 0; - entry = (struct acpi_subtable_header *) - ((unsigned long)table_header + table_size); + if (p->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1) + reset_vector = p->reset_vector; - while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < - table_end) { - if (max_entries && count >= max_entries) - break; + pr_debug("MP Wakeup (version[%d], mailbox[%#llx], reset[%#llx])\n", + p->version, p->mailbox_address, reset_vector); + } + break; - for (i = 0; i < proc_num; i++) { - if (entry->type != proc[i].id) - continue; - if (!proc[i].handler || - (!errs && proc[i].handler(entry, table_end))) { - errs++; - continue; - } + case ACPI_MADT_TYPE_CORE_PIC: + { + struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header; - proc[i].count++; - break; + pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n", + p->processor_id, p->core_id, + str_enabled_disabled(p->flags & ACPI_MADT_ENABLED)); } - if (i != proc_num) - count++; + break; - /* - * If entry->length is 0, break from this loop to avoid - * infinite loop. - */ - if (entry->length == 0) { - pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id); - return -EINVAL; - } + case ACPI_MADT_TYPE_RINTC: + { + struct acpi_madt_rintc *p = (struct acpi_madt_rintc *)header; - entry = (struct acpi_subtable_header *) - ((unsigned long)entry + entry->length); - } + pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n", + p->uid, p->hart_id, + str_enabled_disabled(p->flags & ACPI_MADT_ENABLED)); + } + break; - if (max_entries && count > max_entries) { - pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n", - id, proc->id, count); + default: + pr_warn("Found unsupported MADT entry (type = 0x%x)\n", + header->type); + break; } - - return errs ? -EINVAL : count; } -int __init -acpi_table_parse_entries_array(char *id, - unsigned long table_size, - struct acpi_subtable_proc *proc, int proc_num, - unsigned int max_entries) +int __init_or_acpilib acpi_table_parse_entries_array( + char *id, unsigned long table_size, struct acpi_subtable_proc *proc, + int proc_num, unsigned int max_entries) { struct acpi_table_header *table_header = NULL; int count; @@ -330,40 +253,62 @@ acpi_table_parse_entries_array(char *id, if (!id) return -EINVAL; + if (!table_size) + return -EINVAL; + if (!strncmp(id, ACPI_SIG_MADT, 4)) instance = acpi_apic_instance; acpi_get_table(id, instance, &table_header); if (!table_header) { - pr_warn("%4.4s not present\n", id); + pr_debug("%4.4s not present\n", id); return -ENODEV; } - count = acpi_parse_entries_array(id, table_size, table_header, - proc, proc_num, max_entries); + count = acpi_parse_entries_array(id, table_size, + (union fw_table_header *)table_header, + 0, proc, proc_num, max_entries); acpi_put_table(table_header); return count; } -int __init -acpi_table_parse_entries(char *id, - unsigned long table_size, - int entry_id, - acpi_tbl_entry_handler handler, - unsigned int max_entries) +static int __init_or_acpilib __acpi_table_parse_entries( + char *id, unsigned long table_size, int entry_id, + acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg, + void *arg, unsigned int max_entries) { struct acpi_subtable_proc proc = { .id = entry_id, .handler = handler, + .handler_arg = handler_arg, + .arg = arg, }; return acpi_table_parse_entries_array(id, table_size, &proc, 1, max_entries); } -int __init -acpi_table_parse_madt(enum acpi_madt_type id, +int __init_or_acpilib +acpi_table_parse_cedt(enum acpi_cedt_type id, + acpi_tbl_entry_handler_arg handler_arg, void *arg) +{ + return __acpi_table_parse_entries(ACPI_SIG_CEDT, + sizeof(struct acpi_table_cedt), id, + NULL, handler_arg, arg, 0); +} +EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt); + +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries) +{ + return __acpi_table_parse_entries(id, table_size, entry_id, handler, + NULL, NULL, max_entries); +} + +int __init acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries) { return acpi_table_parse_entries(ACPI_SIG_MADT, @@ -451,17 +396,19 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length) } /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */ -static const char * const table_sigs[] = { - ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ, - ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT, - ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF, - ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET, - ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI, - ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA, - ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, - ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, - ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, - ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, NULL }; +static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = { + ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, + ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, + ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, + ACPI_SIG_ASF, ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, + ACPI_SIG_HPET, ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, + ACPI_SIG_MCHI, ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, + ACPI_SIG_TCPA, ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, + ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, + ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, + ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, + ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI, + ACPI_SIG_NBFT, ACPI_SIG_SWFT, ACPI_SIG_MPAM}; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) @@ -473,14 +420,22 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES); void __init acpi_table_upgrade(void) { - void *data = (void *)initrd_start; - size_t size = initrd_end - initrd_start; + void *data; + size_t size; int sig, no, table_nr = 0, total_offset = 0; long offset = 0; struct acpi_table_header *table; char cpio_path[32] = "kernel/firmware/acpi/"; struct cpio_data file; + if (IS_ENABLED(CONFIG_ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD)) { + data = __initramfs_start; + size = __initramfs_size; + } else { + data = (void *)initrd_start; + size = initrd_end - initrd_start; + } + if (data == NULL || size == 0) return; @@ -500,11 +455,11 @@ void __init acpi_table_upgrade(void) table = file.data; - for (sig = 0; table_sigs[sig]; sig++) + for (sig = 0; sig < ARRAY_SIZE(table_sigs); sig++) if (!memcmp(table->signature, table_sigs[sig], 4)) break; - if (!table_sigs[sig]) { + if (sig >= ARRAY_SIZE(table_sigs)) { pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n", cpio_path, file.name); continue; @@ -531,9 +486,14 @@ void __init acpi_table_upgrade(void) if (table_nr == 0) return; + if (security_locked_down(LOCKDOWN_ACPI_TABLES)) { + pr_notice("kernel is locked down, ignoring table override\n"); + return; + } + acpi_tables_addr = - memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS, - all_tables_size, PAGE_SIZE); + memblock_phys_alloc_range(all_tables_size, PAGE_SIZE, + 0, ACPI_TABLE_UPGRADE_MAX_PHYS); if (!acpi_tables_addr) { WARN_ON(1); return; @@ -548,9 +508,10 @@ void __init acpi_table_upgrade(void) * Both memblock_reserve and e820__range_add (via arch_reserve_mem_area) * works fine. */ - memblock_reserve(acpi_tables_addr, all_tables_size); arch_reserve_mem_area(acpi_tables_addr, all_tables_size); + kmemleak_ignore_phys(acpi_tables_addr); + /* * early_ioremap only can remap 256k one time. If we map all * tables one time, we will hit the limit. Need to map chunks @@ -662,15 +623,15 @@ static void __init acpi_table_initrd_scan(void) table_length = table->length; /* Skip RSDT/XSDT which should only be used for override */ - if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) || - ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) { + if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) || + ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) { acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); goto next_table; } /* * Mark the table to avoid being used in * acpi_table_initrd_override(). Though this is not possible - * because override is disabled in acpi_install_table(). + * because override is disabled in acpi_install_physical_table(). */ if (test_and_set_bit(table_index, acpi_initrd_installed)) { acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); @@ -681,7 +642,7 @@ static void __init acpi_table_initrd_scan(void) table->signature, table->oem_id, table->oem_table_id); acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); - acpi_install_table(acpi_tables_addr + table_offset, TRUE); + acpi_install_physical_table(acpi_tables_addr + table_offset); next_table: table_offset += table_length; table_index++; @@ -717,8 +678,7 @@ static void *amlcode __attribute__ ((weakref("AmlCode"))); static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code"))); #endif -acpi_status -acpi_os_table_override(struct acpi_table_header *existing_table, +acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table) { if (!existing_table || !new_table) @@ -739,15 +699,14 @@ acpi_os_table_override(struct acpi_table_header *existing_table, } /* - * acpi_table_init() + * acpi_locate_initial_tables() * - * find RSDP, find and checksum SDT/XSDT. - * checksum all tables, print SDT/XSDT + * Get the RSDP, then find and checksum all the ACPI tables. * - * result: sdt_entry[] is initialized + * result: initial_tables[] is initialized, and points to + * a list of ACPI tables. */ - -int __init acpi_table_init(void) +int __init acpi_locate_initial_tables(void) { acpi_status status; @@ -760,11 +719,51 @@ int __init acpi_table_init(void) } status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + + pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg); return -EINVAL; - acpi_table_initrd_scan(); + } + return 0; +} + +void __init acpi_reserve_initial_tables(void) +{ + int i; + + for (i = 0; i < ACPI_MAX_TABLES; i++) { + struct acpi_table_desc *table_desc = &initial_tables[i]; + u64 start = table_desc->address; + u64 size = table_desc->length; + + if (!start || !size) + break; + + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", + table_desc->signature.ascii, start, start + size - 1); + + memblock_reserve(start, size); + } +} + +void __init acpi_table_init_complete(void) +{ + acpi_table_initrd_scan(); check_multiple_madt(); +} + +int __init acpi_table_init(void) +{ + int ret; + + ret = acpi_locate_initial_tables(); + if (ret) + return ret; + + acpi_table_init_complete(); + return 0; } @@ -780,7 +779,6 @@ static int __init acpi_parse_apic_instance(char *str) return 0; } - early_param("acpi_apic_instance", acpi_parse_apic_instance); static int __init acpi_force_table_verification_setup(char *s) @@ -789,7 +787,6 @@ static int __init acpi_force_table_verification_setup(char *s) return 0; } - early_param("acpi_force_table_verification", acpi_force_table_verification_setup); static int __init acpi_force_32bit_fadt_addr(char *s) @@ -799,5 +796,4 @@ static int __init acpi_force_32bit_fadt_addr(char *s) return 0; } - early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 551b71a24b85..a511f9ea0267 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1,32 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_thermal.c - ACPI Thermal Zone Driver ($Revision: 41 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * * This driver fully implements the ACPI thermal policy as described in the * ACPI 2.0 Specification. * * TBD: 1. Implement passive cooling hysteresis. * 2. Enhance passive cooling (CPU) states/limit interface to support * concepts of 'multiple limiters', upper/lower limits, etc. - * */ +#define pr_fmt(fmt) "ACPI: thermal: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/dmi.h> @@ -41,8 +29,9 @@ #include <linux/acpi.h> #include <linux/workqueue.h> #include <linux/uaccess.h> +#include <linux/units.h> -#define PREFIX "ACPI: " +#include "internal.h" #define ACPI_THERMAL_CLASS "thermal_zone" #define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" @@ -53,15 +42,26 @@ #define ACPI_THERMAL_NOTIFY_HOT 0xF1 #define ACPI_THERMAL_MODE_ACTIVE 0x00 -#define ACPI_THERMAL_MAX_ACTIVE 10 -#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 +#define ACPI_THERMAL_MAX_ACTIVE 10 +#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 -#define _COMPONENT ACPI_THERMAL_COMPONENT -ACPI_MODULE_NAME("thermal"); +#define ACPI_THERMAL_TRIP_PASSIVE (-1) -MODULE_AUTHOR("Paul Diefenbaugh"); -MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); -MODULE_LICENSE("GPL"); +#define ACPI_THERMAL_MAX_NR_TRIPS (ACPI_THERMAL_MAX_ACTIVE + 3) + +/* + * This exception is thrown out in two cases: + * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid + * when re-evaluating the AML code. + * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change. + * We need to re-bind the cooling devices of a thermal zone when this occurs. + */ +#define ACPI_THERMAL_TRIPS_EXCEPTION(tz, str) \ +do { \ + acpi_handle_info(tz->device->handle, \ + "ACPI thermal trip point %s changed\n" \ + "Please report to linux-acpi@vger.kernel.org\n", str); \ +} while (0) static int act; module_param(act, int, 0644); @@ -75,10 +75,6 @@ static int tzp; module_param(tzp, int, 0444); MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds."); -static int nocrt; -module_param(nocrt, int, 0); -MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points."); - static int off; module_param(off, int, 0); MODULE_PARM_DESC(off, "Set to disable ACPI thermal support."); @@ -89,105 +85,40 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); static struct workqueue_struct *acpi_thermal_pm_queue; -static int acpi_thermal_add(struct acpi_device *device); -static int acpi_thermal_remove(struct acpi_device *device); -static void acpi_thermal_notify(struct acpi_device *device, u32 event); - -static const struct acpi_device_id thermal_device_ids[] = { - {ACPI_THERMAL_HID, 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, thermal_device_ids); - -#ifdef CONFIG_PM_SLEEP -static int acpi_thermal_suspend(struct device *dev); -static int acpi_thermal_resume(struct device *dev); -#else -#define acpi_thermal_suspend NULL -#define acpi_thermal_resume NULL -#endif -static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume); - -static struct acpi_driver acpi_thermal_driver = { - .name = "thermal", - .class = ACPI_THERMAL_CLASS, - .ids = thermal_device_ids, - .ops = { - .add = acpi_thermal_add, - .remove = acpi_thermal_remove, - .notify = acpi_thermal_notify, - }, - .drv.pm = &acpi_thermal_pm, -}; - -struct acpi_thermal_state { - u8 critical:1; - u8 hot:1; - u8 passive:1; - u8 active:1; - u8 reserved:4; - int active_index; -}; - -struct acpi_thermal_state_flags { - u8 valid:1; - u8 enabled:1; - u8 reserved:6; -}; - -struct acpi_thermal_critical { - struct acpi_thermal_state_flags flags; - unsigned long temperature; -}; - -struct acpi_thermal_hot { - struct acpi_thermal_state_flags flags; - unsigned long temperature; +struct acpi_thermal_trip { + unsigned long temp_dk; + struct acpi_handle_list devices; }; struct acpi_thermal_passive { - struct acpi_thermal_state_flags flags; - unsigned long temperature; + struct acpi_thermal_trip trip; unsigned long tc1; unsigned long tc2; - unsigned long tsp; - struct acpi_handle_list devices; + unsigned long delay; }; struct acpi_thermal_active { - struct acpi_thermal_state_flags flags; - unsigned long temperature; - struct acpi_handle_list devices; + struct acpi_thermal_trip trip; }; struct acpi_thermal_trips { - struct acpi_thermal_critical critical; - struct acpi_thermal_hot hot; struct acpi_thermal_passive passive; struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE]; }; -struct acpi_thermal_flags { - u8 cooling_mode:1; /* _SCP */ - u8 devices:1; /* _TZD */ - u8 reserved:6; -}; - struct acpi_thermal { - struct acpi_device * device; + struct acpi_device *device; acpi_bus_id name; - unsigned long temperature; - unsigned long last_temperature; + unsigned long temp_dk; + unsigned long last_temp_dk; unsigned long polling_frequency; volatile u8 zombie; - struct acpi_thermal_flags flags; - struct acpi_thermal_state state; struct acpi_thermal_trips trips; - struct acpi_handle_list devices; struct thermal_zone_device *thermal_zone; - int tz_enabled; - int kelvin_offset; + int kelvin_offset; /* in millidegrees */ struct work_struct thermal_check_work; + struct mutex thermal_check_lock; + refcount_t thermal_check_count; }; /* -------------------------------------------------------------------------- @@ -202,15 +133,16 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz) if (!tz) return -EINVAL; - tz->last_temperature = tz->temperature; + tz->last_temp_dk = tz->temp_dk; status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) return -ENODEV; - tz->temperature = tmp; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n", - tz->temperature)); + tz->temp_dk = tmp; + + acpi_handle_debug(tz->device->handle, "Temperature is %lu dK\n", + tz->temp_dk); return 0; } @@ -228,722 +160,507 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) return -ENODEV; tz->polling_frequency = tmp; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n", - tz->polling_frequency)); + acpi_handle_debug(tz->device->handle, "Polling frequency is %lu dS\n", + tz->polling_frequency); return 0; } -static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode) +static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k) { - if (!tz) - return -EINVAL; + int temp; - if (!acpi_has_method(tz->device->handle, "_SCP")) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n")); - return -ENODEV; - } else if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle, - "_SCP", mode))) { - return -ENODEV; - } + if (temp_deci_k == THERMAL_TEMP_INVALID) + return THERMAL_TEMP_INVALID; - return 0; + temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k, + tz->kelvin_offset); + if (temp <= 0) + return THERMAL_TEMP_INVALID; + + return temp; } -#define ACPI_TRIPS_CRITICAL 0x01 -#define ACPI_TRIPS_HOT 0x02 -#define ACPI_TRIPS_PASSIVE 0x04 -#define ACPI_TRIPS_ACTIVE 0x08 -#define ACPI_TRIPS_DEVICES 0x10 +static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip) +{ + return acpi_trip->temp_dk != THERMAL_TEMP_INVALID; +} -#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE) -#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES +static int active_trip_index(struct acpi_thermal *tz, + struct acpi_thermal_trip *acpi_trip) +{ + struct acpi_thermal_active *active; -#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \ - ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \ - ACPI_TRIPS_DEVICES) + active = container_of(acpi_trip, struct acpi_thermal_active, trip); + return active - tz->trips.active; +} -/* - * This exception is thrown out in two cases: - * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid - * when re-evaluating the AML code. - * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change. - * We need to re-bind the cooling devices of a thermal zone when this occurs. - */ -#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \ -do { \ - if (flags != ACPI_TRIPS_INIT) \ - ACPI_EXCEPTION((AE_INFO, AE_ERROR, \ - "ACPI thermal trip point %s changed\n" \ - "Please send acpidump to linux-acpi@vger.kernel.org", str)); \ -} while (0) +static long get_passive_temp(struct acpi_thermal *tz) +{ + int temp; -static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + if (acpi_passive_trip_temp(tz->device, &temp)) + return THERMAL_TEMP_INVALID; + + return temp; +} + +static long get_active_temp(struct acpi_thermal *tz, int index) { - acpi_status status = AE_OK; - unsigned long long tmp; - struct acpi_handle_list devices; - int valid = 0; - int i; + int temp; - /* Critical Shutdown */ - if (flag & ACPI_TRIPS_CRITICAL) { - status = acpi_evaluate_integer(tz->device->handle, - "_CRT", NULL, &tmp); - tz->trips.critical.temperature = tmp; - /* - * Treat freezing temperatures as invalid as well; some - * BIOSes return really low values and cause reboots at startup. - * Below zero (Celsius) values clearly aren't right for sure.. - * ... so lets discard those as invalid. - */ - if (ACPI_FAILURE(status)) { - tz->trips.critical.flags.valid = 0; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No critical threshold\n")); - } else if (tmp <= 2732) { - pr_warn(FW_BUG "Invalid critical threshold (%llu)\n", - tmp); - tz->trips.critical.flags.valid = 0; - } else { - tz->trips.critical.flags.valid = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Found critical threshold [%lu]\n", - tz->trips.critical.temperature)); - } - if (tz->trips.critical.flags.valid == 1) { - if (crt == -1) { - tz->trips.critical.flags.valid = 0; - } else if (crt > 0) { - unsigned long crt_k = CELSIUS_TO_DECI_KELVIN(crt); - /* - * Allow override critical threshold - */ - if (crt_k > tz->trips.critical.temperature) - pr_warn(PREFIX "Critical threshold %d C\n", - crt); - tz->trips.critical.temperature = crt_k; - } - } - } + if (acpi_active_trip_temp(tz->device, index, &temp)) + return THERMAL_TEMP_INVALID; - /* Critical Sleep (optional) */ - if (flag & ACPI_TRIPS_HOT) { - status = acpi_evaluate_integer(tz->device->handle, - "_HOT", NULL, &tmp); - if (ACPI_FAILURE(status)) { - tz->trips.hot.flags.valid = 0; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No hot threshold\n")); - } else { - tz->trips.hot.temperature = tmp; - tz->trips.hot.flags.valid = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Found hot threshold [%lu]\n", - tz->trips.hot.temperature)); - } - } + /* + * If an override has been provided, apply it so there are no active + * trips with thresholds greater than the override. + */ + if (act > 0) { + unsigned long long override = celsius_to_deci_kelvin(act); - /* Passive (optional) */ - if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) || - (flag == ACPI_TRIPS_INIT)) { - valid = tz->trips.passive.flags.valid; - if (psv == -1) { - status = AE_SUPPORT; - } else if (psv > 0) { - tmp = CELSIUS_TO_DECI_KELVIN(psv); - status = AE_OK; - } else { - status = acpi_evaluate_integer(tz->device->handle, - "_PSV", NULL, &tmp); - } - - if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; - else { - tz->trips.passive.temperature = tmp; - tz->trips.passive.flags.valid = 1; - if (flag == ACPI_TRIPS_INIT) { - status = acpi_evaluate_integer( - tz->device->handle, "_TC1", - NULL, &tmp); - if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; - else - tz->trips.passive.tc1 = tmp; - status = acpi_evaluate_integer( - tz->device->handle, "_TC2", - NULL, &tmp); - if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; - else - tz->trips.passive.tc2 = tmp; - status = acpi_evaluate_integer( - tz->device->handle, "_TSP", - NULL, &tmp); - if (ACPI_FAILURE(status)) - tz->trips.passive.flags.valid = 0; - else - tz->trips.passive.tsp = tmp; - } - } - } - if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) { - memset(&devices, 0, sizeof(struct acpi_handle_list)); - status = acpi_evaluate_reference(tz->device->handle, "_PSL", - NULL, &devices); - if (ACPI_FAILURE(status)) { - pr_warn(PREFIX "Invalid passive threshold\n"); - tz->trips.passive.flags.valid = 0; - } - else - tz->trips.passive.flags.valid = 1; - - if (memcmp(&tz->trips.passive.devices, &devices, - sizeof(struct acpi_handle_list))) { - memcpy(&tz->trips.passive.devices, &devices, - sizeof(struct acpi_handle_list)); - ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); - } - } - if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) { - if (valid != tz->trips.passive.flags.valid) - ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state"); + if (temp > override) + return override; } + return temp; +} - /* Active (optional) */ - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { - char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; - valid = tz->trips.active[i].flags.valid; +static void acpi_thermal_update_trip(struct acpi_thermal *tz, + const struct thermal_trip *trip) +{ + struct acpi_thermal_trip *acpi_trip = trip->priv; - if (act == -1) - break; /* disable all active trip points */ - - if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) && - tz->trips.active[i].flags.valid)) { - status = acpi_evaluate_integer(tz->device->handle, - name, NULL, &tmp); - if (ACPI_FAILURE(status)) { - tz->trips.active[i].flags.valid = 0; - if (i == 0) - break; - if (act <= 0) - break; - if (i == 1) - tz->trips.active[0].temperature = - CELSIUS_TO_DECI_KELVIN(act); - else - /* - * Don't allow override higher than - * the next higher trip point - */ - tz->trips.active[i - 1].temperature = - (tz->trips.active[i - 2].temperature < - CELSIUS_TO_DECI_KELVIN(act) ? - tz->trips.active[i - 2].temperature : - CELSIUS_TO_DECI_KELVIN(act)); - break; - } else { - tz->trips.active[i].temperature = tmp; - tz->trips.active[i].flags.valid = 1; - } - } - - name[2] = 'L'; - if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) { - memset(&devices, 0, sizeof(struct acpi_handle_list)); - status = acpi_evaluate_reference(tz->device->handle, - name, NULL, &devices); - if (ACPI_FAILURE(status)) { - pr_warn(PREFIX "Invalid active%d threshold\n", - i); - tz->trips.active[i].flags.valid = 0; - } - else - tz->trips.active[i].flags.valid = 1; - - if (memcmp(&tz->trips.active[i].devices, &devices, - sizeof(struct acpi_handle_list))) { - memcpy(&tz->trips.active[i].devices, &devices, - sizeof(struct acpi_handle_list)); - ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); - } - } - if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES)) - if (valid != tz->trips.active[i].flags.valid) - ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state"); - - if (!tz->trips.active[i].flags.valid) - break; - } + if (trip->type == THERMAL_TRIP_PASSIVE) { + if (psv > 0) + return; + + acpi_trip->temp_dk = get_passive_temp(tz); + } else { + int index = active_trip_index(tz, acpi_trip); - if ((flag & ACPI_TRIPS_DEVICES) - && acpi_has_method(tz->device->handle, "_TZD")) { - memset(&devices, 0, sizeof(devices)); - status = acpi_evaluate_reference(tz->device->handle, "_TZD", - NULL, &devices); - if (ACPI_SUCCESS(status) - && memcmp(&tz->devices, &devices, sizeof(devices))) { - tz->devices = devices; - ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device"); - } + acpi_trip->temp_dk = get_active_temp(tz, index); } - return 0; + if (!acpi_thermal_trip_valid(acpi_trip)) + ACPI_THERMAL_TRIPS_EXCEPTION(tz, "state"); } -static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) +static bool update_trip_devices(struct acpi_thermal *tz, + struct acpi_thermal_trip *acpi_trip, + int index, bool compare) { - int i, valid, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); - - if (ret) - return ret; + struct acpi_handle_list devices = { 0 }; + char method[] = "_PSL"; - valid = tz->trips.critical.flags.valid | - tz->trips.hot.flags.valid | - tz->trips.passive.flags.valid; + if (index != ACPI_THERMAL_TRIP_PASSIVE) { + method[1] = 'A'; + method[2] = 'L'; + method[3] = '0' + index; + } - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) - valid |= tz->trips.active[i].flags.valid; + if (!acpi_evaluate_reference(tz->device->handle, method, NULL, &devices)) { + acpi_handle_info(tz->device->handle, "%s evaluation failure\n", method); + return false; + } - if (!valid) { - pr_warn(FW_BUG "No valid trip found\n"); - return -ENODEV; + if (acpi_handle_list_equal(&acpi_trip->devices, &devices)) { + acpi_handle_list_free(&devices); + return true; } - return 0; + + if (compare) + ACPI_THERMAL_TRIPS_EXCEPTION(tz, "device"); + + acpi_handle_list_replace(&acpi_trip->devices, &devices); + return true; } -static void acpi_thermal_check(void *data) +static void acpi_thermal_update_trip_devices(struct acpi_thermal *tz, + const struct thermal_trip *trip) { - struct acpi_thermal *tz = data; + struct acpi_thermal_trip *acpi_trip = trip->priv; + int index = trip->type == THERMAL_TRIP_PASSIVE ? + ACPI_THERMAL_TRIP_PASSIVE : active_trip_index(tz, acpi_trip); - if (!tz->tz_enabled) + if (update_trip_devices(tz, acpi_trip, index, true)) return; - thermal_zone_device_update(tz->thermal_zone, - THERMAL_EVENT_UNSPECIFIED); + acpi_trip->temp_dk = THERMAL_TEMP_INVALID; + ACPI_THERMAL_TRIPS_EXCEPTION(tz, "state"); } -/* sys I/F for generic thermal sysfs support */ +struct adjust_trip_data { + struct acpi_thermal *tz; + u32 event; +}; -static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) +static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data) { - struct acpi_thermal *tz = thermal->devdata; - int result; - - if (!tz) - return -EINVAL; + struct acpi_thermal_trip *acpi_trip = trip->priv; + struct adjust_trip_data *atd = data; + struct acpi_thermal *tz = atd->tz; + int temp; - result = acpi_thermal_get_temperature(tz); - if (result) - return result; - - *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature, - tz->kelvin_offset); - return 0; -} + if (!acpi_trip || !acpi_thermal_trip_valid(acpi_trip)) + return 0; -static int thermal_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - struct acpi_thermal *tz = thermal->devdata; + if (atd->event == ACPI_THERMAL_NOTIFY_THRESHOLDS) + acpi_thermal_update_trip(tz, trip); + else + acpi_thermal_update_trip_devices(tz, trip); - if (!tz) - return -EINVAL; + if (acpi_thermal_trip_valid(acpi_trip)) + temp = acpi_thermal_temp(tz, acpi_trip->temp_dk); + else + temp = THERMAL_TEMP_INVALID; - *mode = tz->tz_enabled ? THERMAL_DEVICE_ENABLED : - THERMAL_DEVICE_DISABLED; + thermal_zone_set_trip_temp(tz->thermal_zone, trip, temp); return 0; } -static int thermal_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) +static void acpi_queue_thermal_check(struct acpi_thermal *tz) { - struct acpi_thermal *tz = thermal->devdata; - int enable; + if (!work_pending(&tz->thermal_check_work)) + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); +} - if (!tz) - return -EINVAL; +static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event) +{ + struct adjust_trip_data atd = { .tz = tz, .event = event }; + struct acpi_device *adev = tz->device; /* - * enable/disable thermal management from ACPI thermal driver + * Use thermal_zone_for_each_trip() to carry out the trip points + * update, so as to protect thermal_get_trend() from getting stale + * trip point temperatures and to prevent thermal_zone_device_update() + * invoked from acpi_thermal_check_fn() from producing inconsistent + * results. */ - if (mode == THERMAL_DEVICE_ENABLED) - enable = 1; - else if (mode == THERMAL_DEVICE_DISABLED) { - enable = 0; - pr_warn("thermal zone will be disabled\n"); - } else - return -EINVAL; - - if (enable != tz->tz_enabled) { - tz->tz_enabled = enable; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "%s kernel ACPI thermal control\n", - tz->tz_enabled ? "Enable" : "Disable")); - acpi_thermal_check(tz); - } - return 0; + thermal_zone_for_each_trip(tz->thermal_zone, + acpi_thermal_adjust_trip, &atd); + acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(adev->pnp.device_class, + dev_name(&adev->dev), event, 0); } -static int thermal_get_trip_type(struct thermal_zone_device *thermal, - int trip, enum thermal_trip_type *type) +static int acpi_thermal_get_critical_trip(struct acpi_thermal *tz) { - struct acpi_thermal *tz = thermal->devdata; - int i; + int temp; - if (!tz || trip < 0) - return -EINVAL; - - if (tz->trips.critical.flags.valid) { - if (!trip) { - *type = THERMAL_TRIP_CRITICAL; - return 0; - } - trip--; + if (crt > 0) { + temp = celsius_to_deci_kelvin(crt); + goto set; } - - if (tz->trips.hot.flags.valid) { - if (!trip) { - *type = THERMAL_TRIP_HOT; - return 0; - } - trip--; + if (crt == -1) { + acpi_handle_debug(tz->device->handle, "Critical threshold disabled\n"); + return THERMAL_TEMP_INVALID; } - if (tz->trips.passive.flags.valid) { - if (!trip) { - *type = THERMAL_TRIP_PASSIVE; - return 0; - } - trip--; + if (acpi_critical_trip_temp(tz->device, &temp)) + return THERMAL_TEMP_INVALID; + + if (temp <= 2732) { + /* + * Below zero (Celsius) values clearly aren't right for sure, + * so discard them as invalid. + */ + pr_info(FW_BUG "Invalid critical threshold (%d)\n", temp); + return THERMAL_TEMP_INVALID; } - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && - tz->trips.active[i].flags.valid; i++) { - if (!trip) { - *type = THERMAL_TRIP_ACTIVE; - return 0; - } - trip--; +set: + acpi_handle_debug(tz->device->handle, "Critical threshold [%d]\n", temp); + return temp; +} + +static int acpi_thermal_get_hot_trip(struct acpi_thermal *tz) +{ + int temp; + + if (acpi_hot_trip_temp(tz->device, &temp) || temp == THERMAL_TEMP_INVALID) { + acpi_handle_debug(tz->device->handle, "No hot threshold\n"); + return THERMAL_TEMP_INVALID; } - return -EINVAL; + acpi_handle_debug(tz->device->handle, "Hot threshold [%d]\n", temp); + return temp; } -static int thermal_get_trip_temp(struct thermal_zone_device *thermal, - int trip, int *temp) +static bool passive_trip_params_init(struct acpi_thermal *tz) { - struct acpi_thermal *tz = thermal->devdata; - int i; + unsigned long long tmp; + acpi_status status; - if (!tz || trip < 0) - return -EINVAL; + status = acpi_evaluate_integer(tz->device->handle, "_TC1", NULL, &tmp); + if (ACPI_FAILURE(status)) + return false; - if (tz->trips.critical.flags.valid) { - if (!trip) { - *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->trips.critical.temperature, - tz->kelvin_offset); - return 0; - } - trip--; - } + tz->trips.passive.tc1 = tmp; - if (tz->trips.hot.flags.valid) { - if (!trip) { - *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->trips.hot.temperature, - tz->kelvin_offset); - return 0; - } - trip--; - } + status = acpi_evaluate_integer(tz->device->handle, "_TC2", NULL, &tmp); + if (ACPI_FAILURE(status)) + return false; - if (tz->trips.passive.flags.valid) { - if (!trip) { - *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->trips.passive.temperature, - tz->kelvin_offset); - return 0; - } - trip--; - } + tz->trips.passive.tc2 = tmp; - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && - tz->trips.active[i].flags.valid; i++) { - if (!trip) { - *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->trips.active[i].temperature, - tz->kelvin_offset); - return 0; - } - trip--; + status = acpi_evaluate_integer(tz->device->handle, "_TFP", NULL, &tmp); + if (ACPI_SUCCESS(status)) { + tz->trips.passive.delay = tmp; + return true; } - return -EINVAL; + status = acpi_evaluate_integer(tz->device->handle, "_TSP", NULL, &tmp); + if (ACPI_FAILURE(status)) + return false; + + tz->trips.passive.delay = tmp * 100; + + return true; } -static int thermal_get_crit_temp(struct thermal_zone_device *thermal, - int *temperature) +static bool acpi_thermal_init_trip(struct acpi_thermal *tz, int index) { - struct acpi_thermal *tz = thermal->devdata; + struct acpi_thermal_trip *acpi_trip; + long temp; - if (tz->trips.critical.flags.valid) { - *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->trips.critical.temperature, - tz->kelvin_offset); - return 0; - } else - return -EINVAL; + if (index == ACPI_THERMAL_TRIP_PASSIVE) { + acpi_trip = &tz->trips.passive.trip; + + if (psv == -1) + goto fail; + + if (!passive_trip_params_init(tz)) + goto fail; + + temp = psv > 0 ? celsius_to_deci_kelvin(psv) : + get_passive_temp(tz); + } else { + acpi_trip = &tz->trips.active[index].trip; + + if (act == -1) + goto fail; + + temp = get_active_temp(tz, index); + } + + if (temp == THERMAL_TEMP_INVALID) + goto fail; + + if (!update_trip_devices(tz, acpi_trip, index, false)) + goto fail; + + acpi_trip->temp_dk = temp; + return true; + +fail: + acpi_trip->temp_dk = THERMAL_TEMP_INVALID; + return false; } -static int thermal_get_trend(struct thermal_zone_device *thermal, - int trip, enum thermal_trend *trend) +static void acpi_thermal_get_trip_points(struct acpi_thermal *tz) { - struct acpi_thermal *tz = thermal->devdata; - enum thermal_trip_type type; int i; - if (thermal_get_trip_type(thermal, trip, &type)) - return -EINVAL; - - if (type == THERMAL_TRIP_ACTIVE) { - int trip_temp; - int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( - tz->temperature, tz->kelvin_offset); - if (thermal_get_trip_temp(thermal, trip, &trip_temp)) - return -EINVAL; + acpi_thermal_init_trip(tz, ACPI_THERMAL_TRIP_PASSIVE); - if (temp > trip_temp) { - *trend = THERMAL_TREND_RAISING; - return 0; - } else { - /* Fall back on default trend */ - return -EINVAL; - } + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { + if (!acpi_thermal_init_trip(tz, i)) + break; } - /* - * tz->temperature has already been updated by generic thermal layer, - * before this callback being invoked - */ - i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature)) - + (tz->trips.passive.tc2 - * (tz->temperature - tz->trips.passive.temperature)); - - if (i > 0) - *trend = THERMAL_TREND_RAISING; - else if (i < 0) - *trend = THERMAL_TREND_DROPPING; - else - *trend = THERMAL_TREND_STABLE; - return 0; + while (++i < ACPI_THERMAL_MAX_ACTIVE) + tz->trips.active[i].trip.temp_dk = THERMAL_TEMP_INVALID; } +/* sys I/F for generic thermal sysfs support */ -static int thermal_notify(struct thermal_zone_device *thermal, int trip, - enum thermal_trip_type trip_type) +static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) { - u8 type = 0; - struct acpi_thermal *tz = thermal->devdata; - - if (trip_type == THERMAL_TRIP_CRITICAL) - type = ACPI_THERMAL_NOTIFY_CRITICAL; - else if (trip_type == THERMAL_TRIP_HOT) - type = ACPI_THERMAL_NOTIFY_HOT; - else - return 0; + struct acpi_thermal *tz = thermal_zone_device_priv(thermal); + int result; - acpi_bus_generate_netlink_event(tz->device->pnp.device_class, - dev_name(&tz->device->dev), type, 1); + if (!tz) + return -EINVAL; - if (trip_type == THERMAL_TRIP_CRITICAL && nocrt) - return 1; + result = acpi_thermal_get_temperature(tz); + if (result) + return result; + *temp = deci_kelvin_to_millicelsius_with_offset(tz->temp_dk, + tz->kelvin_offset); return 0; } -static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev, - bool bind) +static int thermal_get_trend(struct thermal_zone_device *thermal, + const struct thermal_trip *trip, + enum thermal_trend *trend) { - struct acpi_device *device = cdev->devdata; - struct acpi_thermal *tz = thermal->devdata; - struct acpi_device *dev; - acpi_status status; - acpi_handle handle; - int i; - int j; - int trip = -1; - int result = 0; + struct acpi_thermal *tz = thermal_zone_device_priv(thermal); + struct acpi_thermal_trip *acpi_trip; + int t; - if (tz->trips.critical.flags.valid) - trip++; + if (!tz || !trip) + return -EINVAL; - if (tz->trips.hot.flags.valid) - trip++; + acpi_trip = trip->priv; + if (!acpi_trip || !acpi_thermal_trip_valid(acpi_trip)) + return -EINVAL; - if (tz->trips.passive.flags.valid) { - trip++; - for (i = 0; i < tz->trips.passive.devices.count; - i++) { - handle = tz->trips.passive.devices.handles[i]; - status = acpi_bus_get_device(handle, &dev); - if (ACPI_FAILURE(status) || dev != device) - continue; - if (bind) - result = - thermal_zone_bind_cooling_device - (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); - else - result = - thermal_zone_unbind_cooling_device - (thermal, trip, cdev); - if (result) - goto failed; - } - } + switch (trip->type) { + case THERMAL_TRIP_PASSIVE: + t = tz->trips.passive.tc1 * (tz->temp_dk - + tz->last_temp_dk) + + tz->trips.passive.tc2 * (tz->temp_dk - + acpi_trip->temp_dk); + if (t > 0) + *trend = THERMAL_TREND_RAISING; + else if (t < 0) + *trend = THERMAL_TREND_DROPPING; + else + *trend = THERMAL_TREND_STABLE; - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { - if (!tz->trips.active[i].flags.valid) + return 0; + + case THERMAL_TRIP_ACTIVE: + t = acpi_thermal_temp(tz, tz->temp_dk); + if (t <= trip->temperature) break; - trip++; - for (j = 0; - j < tz->trips.active[i].devices.count; - j++) { - handle = tz->trips.active[i].devices.handles[j]; - status = acpi_bus_get_device(handle, &dev); - if (ACPI_FAILURE(status) || dev != device) - continue; - if (bind) - result = thermal_zone_bind_cooling_device - (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); - else - result = thermal_zone_unbind_cooling_device - (thermal, trip, cdev); - if (result) - goto failed; - } - } - for (i = 0; i < tz->devices.count; i++) { - handle = tz->devices.handles[i]; - status = acpi_bus_get_device(handle, &dev); - if (ACPI_SUCCESS(status) && (dev == device)) { - if (bind) - result = thermal_zone_bind_cooling_device - (thermal, THERMAL_TRIPS_NONE, - cdev, THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); - else - result = thermal_zone_unbind_cooling_device - (thermal, THERMAL_TRIPS_NONE, - cdev); - if (result) - goto failed; - } + *trend = THERMAL_TREND_RAISING; + + return 0; + + default: + break; } -failed: - return result; + return -EINVAL; } -static int -acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) +static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal) { - return acpi_thermal_cooling_device_cb(thermal, cdev, true); + struct acpi_thermal *tz = thermal_zone_device_priv(thermal); + + acpi_bus_generate_netlink_event(tz->device->pnp.device_class, + dev_name(&tz->device->dev), + ACPI_THERMAL_NOTIFY_HOT, 1); } -static int -acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) +static void acpi_thermal_zone_device_critical(struct thermal_zone_device *thermal) { - return acpi_thermal_cooling_device_cb(thermal, cdev, false); + struct acpi_thermal *tz = thermal_zone_device_priv(thermal); + + acpi_bus_generate_netlink_event(tz->device->pnp.device_class, + dev_name(&tz->device->dev), + ACPI_THERMAL_NOTIFY_CRITICAL, 1); + + thermal_zone_device_critical(thermal); } -static struct thermal_zone_device_ops acpi_thermal_zone_ops = { - .bind = acpi_thermal_bind_cooling_device, - .unbind = acpi_thermal_unbind_cooling_device, +static bool acpi_thermal_should_bind_cdev(struct thermal_zone_device *thermal, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) +{ + struct acpi_thermal_trip *acpi_trip = trip->priv; + struct acpi_device *cdev_adev = cdev->devdata; + int i; + + /* Skip critical and hot trips. */ + if (!acpi_trip) + return false; + + for (i = 0; i < acpi_trip->devices.count; i++) { + acpi_handle handle = acpi_trip->devices.handles[i]; + + if (acpi_fetch_acpi_dev(handle) == cdev_adev) + return true; + } + + return false; +} + +static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { + .should_bind = acpi_thermal_should_bind_cdev, .get_temp = thermal_get_temp, - .get_mode = thermal_get_mode, - .set_mode = thermal_set_mode, - .get_trip_type = thermal_get_trip_type, - .get_trip_temp = thermal_get_trip_temp, - .get_crit_temp = thermal_get_crit_temp, .get_trend = thermal_get_trend, - .notify = thermal_notify, + .hot = acpi_thermal_zone_device_hot, + .critical = acpi_thermal_zone_device_critical, }; -static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) +static int acpi_thermal_zone_sysfs_add(struct acpi_thermal *tz) { - int trips = 0; - int result; - acpi_status status; - int i; + struct device *tzdev = thermal_zone_device(tz->thermal_zone); + int ret; - if (tz->trips.critical.flags.valid) - trips++; + ret = sysfs_create_link(&tz->device->dev.kobj, + &tzdev->kobj, "thermal_zone"); + if (ret) + return ret; - if (tz->trips.hot.flags.valid) - trips++; + ret = sysfs_create_link(&tzdev->kobj, + &tz->device->dev.kobj, "device"); + if (ret) + sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone"); - if (tz->trips.passive.flags.valid) - trips++; + return ret; +} - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && - tz->trips.active[i].flags.valid; i++, trips++); +static void acpi_thermal_zone_sysfs_remove(struct acpi_thermal *tz) +{ + struct device *tzdev = thermal_zone_device(tz->thermal_zone); + + sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone"); + sysfs_remove_link(&tzdev->kobj, "device"); +} + +static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz, + const struct thermal_trip *trip_table, + unsigned int trip_count, + int passive_delay) +{ + int result; - if (tz->trips.passive.flags.valid) - tz->thermal_zone = - thermal_zone_device_register("acpitz", trips, 0, tz, - &acpi_thermal_zone_ops, NULL, - tz->trips.passive.tsp*100, - tz->polling_frequency*100); + if (trip_count) + tz->thermal_zone = thermal_zone_device_register_with_trips( + "acpitz", trip_table, trip_count, tz, + &acpi_thermal_zone_ops, NULL, passive_delay, + tz->polling_frequency * 100); else - tz->thermal_zone = - thermal_zone_device_register("acpitz", trips, 0, tz, - &acpi_thermal_zone_ops, NULL, - 0, tz->polling_frequency*100); + tz->thermal_zone = thermal_tripless_zone_device_register( + "acpitz", tz, &acpi_thermal_zone_ops, NULL); + if (IS_ERR(tz->thermal_zone)) - return -ENODEV; + return PTR_ERR(tz->thermal_zone); - result = sysfs_create_link(&tz->device->dev.kobj, - &tz->thermal_zone->device.kobj, "thermal_zone"); + result = acpi_thermal_zone_sysfs_add(tz); if (result) - return result; + goto unregister_tzd; - result = sysfs_create_link(&tz->thermal_zone->device.kobj, - &tz->device->dev.kobj, "device"); + result = thermal_zone_device_enable(tz->thermal_zone); if (result) - return result; - - status = acpi_bus_attach_private_data(tz->device->handle, - tz->thermal_zone); - if (ACPI_FAILURE(status)) - return -ENODEV; - - tz->tz_enabled = 1; + goto remove_links; dev_info(&tz->device->dev, "registered as thermal_zone%d\n", - tz->thermal_zone->id); + thermal_zone_device_id(tz->thermal_zone)); + return 0; + +remove_links: + acpi_thermal_zone_sysfs_remove(tz); +unregister_tzd: + thermal_zone_device_unregister(tz->thermal_zone); + + return result; } static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) { - sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone"); - sysfs_remove_link(&tz->thermal_zone->device.kobj, "device"); + thermal_zone_device_disable(tz->thermal_zone); + acpi_thermal_zone_sysfs_remove(tz); thermal_zone_device_unregister(tz->thermal_zone); tz->thermal_zone = NULL; - acpi_bus_detach_private_data(tz->device->handle); } @@ -951,33 +668,25 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) Driver Interface -------------------------------------------------------------------------- */ -static void acpi_thermal_notify(struct acpi_device *device, u32 event) +static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) { + struct acpi_device *device = data; struct acpi_thermal *tz = acpi_driver_data(device); - if (!tz) return; switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: - acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); - acpi_thermal_check(tz); - acpi_bus_generate_netlink_event(device->pnp.device_class, - dev_name(&device->dev), event, 0); - break; case ACPI_THERMAL_NOTIFY_DEVICES: - acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); - acpi_thermal_check(tz); - acpi_bus_generate_netlink_event(device->pnp.device_class, - dev_name(&device->dev), event, 0); + acpi_thermal_trips_update(tz, event); break; default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); + acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n", + event); break; } } @@ -1014,40 +723,6 @@ static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz) acpi_evaluate_integer(handle, "_TMP", NULL, &value); } -static int acpi_thermal_get_info(struct acpi_thermal *tz) -{ - int result = 0; - - - if (!tz) - return -EINVAL; - - acpi_thermal_aml_dependency_fix(tz); - - /* Get trip points [_CRT, _PSV, etc.] (required) */ - result = acpi_thermal_get_trip_points(tz); - if (result) - return result; - - /* Get temperature [_TMP] (required) */ - result = acpi_thermal_get_temperature(tz); - if (result) - return result; - - /* Set the cooling mode [_SCP] to active cooling (default) */ - result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); - if (!result) - tz->flags.cooling_mode = 1; - - /* Get default polling frequency [_TZP] (optional) */ - if (tzp) - tz->polling_frequency = tzp; - else - acpi_thermal_get_polling_frequency(tz); - - return 0; -} - /* * The exact offset between Kelvin and degree Celsius is 273.15. However ACPI * handles temperature values with a single decimal place. As a consequence, @@ -1058,27 +733,60 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) * The heuristic below should work for all ACPI thermal zones which have a * critical trip point with a value being a multiple of 0.5 degree Celsius. */ -static void acpi_thermal_guess_offset(struct acpi_thermal *tz) +static void acpi_thermal_guess_offset(struct acpi_thermal *tz, long crit_temp) { - if (tz->trips.critical.flags.valid && - (tz->trips.critical.temperature % 5) == 1) - tz->kelvin_offset = 2731; + if (crit_temp != THERMAL_TEMP_INVALID && crit_temp % 5 == 1) + tz->kelvin_offset = 273100; else - tz->kelvin_offset = 2732; + tz->kelvin_offset = 273200; } static void acpi_thermal_check_fn(struct work_struct *work) { struct acpi_thermal *tz = container_of(work, struct acpi_thermal, thermal_check_work); - acpi_thermal_check(tz); + + /* + * In general, it is not sufficient to check the pending bit, because + * subsequent instances of this function may be queued after one of them + * has started running (e.g. if _TMP sleeps). Avoid bailing out if just + * one of them is running, though, because it may have done the actual + * check some time ago, so allow at least one of them to block on the + * mutex while another one is running the update. + */ + if (!refcount_dec_not_one(&tz->thermal_check_count)) + return; + + mutex_lock(&tz->thermal_check_lock); + + thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); + + refcount_inc(&tz->thermal_check_count); + + mutex_unlock(&tz->thermal_check_lock); } -static int acpi_thermal_add(struct acpi_device *device) +static void acpi_thermal_free_thermal_zone(struct acpi_thermal *tz) { - int result = 0; - struct acpi_thermal *tz = NULL; + int i; + + acpi_handle_list_free(&tz->trips.passive.trip.devices); + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) + acpi_handle_list_free(&tz->trips.active[i].trip.devices); + kfree(tz); +} + +static int acpi_thermal_add(struct acpi_device *device) +{ + struct thermal_trip trip_table[ACPI_THERMAL_MAX_NR_TRIPS] = { 0 }; + struct acpi_thermal_trip *acpi_trip; + struct thermal_trip *trip; + struct acpi_thermal *tz; + int crit_temp, hot_temp; + int passive_delay = 0; + int result; + int i; if (!device) return -EINVAL; @@ -1088,46 +796,121 @@ static int acpi_thermal_add(struct acpi_device *device) return -ENOMEM; tz->device = device; - strcpy(tz->name, device->pnp.bus_id); - strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS); + strscpy(tz->name, device->pnp.bus_id); + strscpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_THERMAL_CLASS); device->driver_data = tz; - result = acpi_thermal_get_info(tz); + acpi_thermal_aml_dependency_fix(tz); + + /* + * Set the cooling mode [_SCP] to active cooling. This needs to happen before + * we retrieve the trip point values. + */ + acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE); + + /* Get trip points [_ACi, _PSV, etc.] (required). */ + acpi_thermal_get_trip_points(tz); + + crit_temp = acpi_thermal_get_critical_trip(tz); + hot_temp = acpi_thermal_get_hot_trip(tz); + + /* Get temperature [_TMP] (required). */ + result = acpi_thermal_get_temperature(tz); if (result) goto free_memory; - acpi_thermal_guess_offset(tz); + /* Determine the default polling frequency [_TZP]. */ + if (tzp) + tz->polling_frequency = tzp; + else + acpi_thermal_get_polling_frequency(tz); + + acpi_thermal_guess_offset(tz, crit_temp); + + trip = trip_table; + + if (crit_temp != THERMAL_TEMP_INVALID) { + trip->type = THERMAL_TRIP_CRITICAL; + trip->temperature = acpi_thermal_temp(tz, crit_temp); + trip++; + } + + if (hot_temp != THERMAL_TEMP_INVALID) { + trip->type = THERMAL_TRIP_HOT; + trip->temperature = acpi_thermal_temp(tz, hot_temp); + trip++; + } + + acpi_trip = &tz->trips.passive.trip; + if (acpi_thermal_trip_valid(acpi_trip)) { + passive_delay = tz->trips.passive.delay; + + trip->type = THERMAL_TRIP_PASSIVE; + trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk); + trip->priv = acpi_trip; + trip++; + } + + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { + acpi_trip = &tz->trips.active[i].trip; + + if (!acpi_thermal_trip_valid(acpi_trip)) + break; + + trip->type = THERMAL_TRIP_ACTIVE; + trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk); + trip->priv = acpi_trip; + trip++; + } + + if (trip == trip_table) + pr_warn(FW_BUG "No valid trip points!\n"); - result = acpi_thermal_register_thermal_zone(tz); + result = acpi_thermal_register_thermal_zone(tz, trip_table, + trip - trip_table, + passive_delay); if (result) goto free_memory; + refcount_set(&tz->thermal_check_count, 3); + mutex_init(&tz->thermal_check_lock); INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); - pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), - acpi_device_bid(device), DECI_KELVIN_TO_CELSIUS(tz->temperature)); - goto end; + pr_info("%s [%s] (%ld C)\n", acpi_device_name(device), + acpi_device_bid(device), deci_kelvin_to_celsius(tz->temp_dk)); + + result = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_thermal_notify, device); + if (result) + goto flush_wq; + return 0; + +flush_wq: + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); free_memory: - kfree(tz); -end: + acpi_thermal_free_thermal_zone(tz); + return result; } -static int acpi_thermal_remove(struct acpi_device *device) +static void acpi_thermal_remove(struct acpi_device *device) { - struct acpi_thermal *tz = NULL; + struct acpi_thermal *tz; if (!device || !acpi_driver_data(device)) - return -EINVAL; + return; - flush_workqueue(acpi_thermal_pm_queue); tz = acpi_driver_data(device); + acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY, + acpi_thermal_notify); + + flush_workqueue(acpi_thermal_pm_queue); acpi_thermal_unregister_thermal_zone(tz); - kfree(tz); - return 0; + acpi_thermal_free_thermal_zone(tz); } #ifdef CONFIG_PM_SLEEP @@ -1141,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev) static int acpi_thermal_resume(struct device *dev) { struct acpi_thermal *tz; - int i, j, power_state, result; + int i, j; if (!dev) return -EINVAL; @@ -1151,59 +934,75 @@ static int acpi_thermal_resume(struct device *dev) return -EINVAL; for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { - if (!(&tz->trips.active[i])) - break; - if (!tz->trips.active[i].flags.valid) + struct acpi_thermal_trip *acpi_trip = &tz->trips.active[i].trip; + + if (!acpi_thermal_trip_valid(acpi_trip)) break; - tz->trips.active[i].flags.enabled = 1; - for (j = 0; j < tz->trips.active[i].devices.count; j++) { - result = acpi_bus_update_power( - tz->trips.active[i].devices.handles[j], - &power_state); - if (result || (power_state != ACPI_STATE_D0)) { - tz->trips.active[i].flags.enabled = 0; - break; - } - } - tz->state.active |= tz->trips.active[i].flags.enabled; + + for (j = 0; j < acpi_trip->devices.count; j++) + acpi_bus_update_power(acpi_trip->devices.handles[j], NULL); } - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); + acpi_queue_thermal_check(tz); return AE_OK; } +#else +#define acpi_thermal_suspend NULL +#define acpi_thermal_resume NULL #endif +static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume); -static int thermal_act(const struct dmi_system_id *d) { +static const struct acpi_device_id thermal_device_ids[] = { + {ACPI_THERMAL_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, thermal_device_ids); + +static struct acpi_driver acpi_thermal_driver = { + .name = "thermal", + .class = ACPI_THERMAL_CLASS, + .ids = thermal_device_ids, + .ops = { + .add = acpi_thermal_add, + .remove = acpi_thermal_remove, + }, + .drv.pm = &acpi_thermal_pm, +}; +static int thermal_act(const struct dmi_system_id *d) +{ if (act == 0) { - pr_notice(PREFIX "%s detected: " - "disabling all active thermal trip points\n", d->ident); + pr_notice("%s detected: disabling all active thermal trip points\n", + d->ident); act = -1; } return 0; } -static int thermal_nocrt(const struct dmi_system_id *d) { - pr_notice(PREFIX "%s detected: " - "disabling all critical thermal trip point actions.\n", d->ident); - nocrt = 1; +static int thermal_nocrt(const struct dmi_system_id *d) +{ + pr_notice("%s detected: disabling all critical thermal trip point actions.\n", + d->ident); + crt = -1; return 0; } -static int thermal_tzp(const struct dmi_system_id *d) { +static int thermal_tzp(const struct dmi_system_id *d) +{ if (tzp == 0) { - pr_notice(PREFIX "%s detected: " - "enabling thermal zone polling\n", d->ident); + pr_notice("%s detected: enabling thermal zone polling\n", + d->ident); tzp = 300; /* 300 dS = 30 Seconds */ } return 0; } -static int thermal_psv(const struct dmi_system_id *d) { +static int thermal_psv(const struct dmi_system_id *d) +{ if (psv == 0) { - pr_notice(PREFIX "%s detected: " - "disabling all passive thermal trip points\n", d->ident); + pr_notice("%s detected: disabling all passive thermal trip points\n", + d->ident); psv = -1; } return 0; @@ -1251,17 +1050,18 @@ static const struct dmi_system_id thermal_dmi_table[] __initconst = { static int __init acpi_thermal_init(void) { - int result = 0; + int result; dmi_check_system(thermal_dmi_table); if (off) { - pr_notice(PREFIX "thermal control disabled\n"); + pr_notice("thermal control disabled\n"); return -ENODEV; } acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm", - WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU, + 0); if (!acpi_thermal_pm_queue) return -ENODEV; @@ -1278,9 +1078,12 @@ static void __exit acpi_thermal_exit(void) { acpi_bus_unregister_driver(&acpi_thermal_driver); destroy_workqueue(acpi_thermal_pm_queue); - - return; } module_init(acpi_thermal_init); module_exit(acpi_thermal_exit); + +MODULE_IMPORT_NS("ACPI_THERMAL"); +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/thermal_lib.c b/drivers/acpi/thermal_lib.c new file mode 100644 index 000000000000..f81591927e86 --- /dev/null +++ b/drivers/acpi/thermal_lib.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2023 Linaro Limited + * Copyright 2023 Intel Corporation + * + * Library routines for retrieving trip point temperature values from the + * platform firmware via ACPI. + */ +#include <linux/acpi.h> +#include <linux/units.h> +#include <linux/thermal.h> +#include "internal.h" + +/* + * Minimum temperature for full military grade is 218°K (-55°C) and + * max temperature is 448°K (175°C). We can consider those values as + * the boundaries for the [trips] temperature returned by the + * firmware. Any values out of these boundaries may be considered + * bogus and we can assume the firmware has no data to provide. + */ +#define TEMP_MIN_DECIK 2180ULL +#define TEMP_MAX_DECIK 4480ULL + +static int acpi_trip_temp(struct acpi_device *adev, char *obj_name, + int *ret_temp) +{ + unsigned long long temp; + acpi_status status; + + status = acpi_evaluate_integer(adev->handle, obj_name, NULL, &temp); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(adev->handle, "%s evaluation failed\n", obj_name); + return -ENODATA; + } + + if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) { + *ret_temp = temp; + } else { + acpi_handle_debug(adev->handle, "%s result %llu out of range\n", + obj_name, temp); + *ret_temp = THERMAL_TEMP_INVALID; + } + + return 0; +} + +int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp) +{ + char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'}; + + if (id < 0 || id > 9) + return -EINVAL; + + return acpi_trip_temp(adev, obj_name, ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, "ACPI_THERMAL"); + +int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_PSV", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, "ACPI_THERMAL"); + +int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_HOT", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, "ACPI_THERMAL"); + +int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_CRT", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, "ACPI_THERMAL"); + +static int thermal_temp(int error, int temp_decik, int *ret_temp) +{ + if (error) + return error; + + if (temp_decik == THERMAL_TEMP_INVALID) + *ret_temp = THERMAL_TEMP_INVALID; + else + *ret_temp = deci_kelvin_to_millicelsius(temp_decik); + + return 0; +} + +/** + * thermal_acpi_active_trip_temp - Retrieve active trip point temperature + * @adev: Target thermal zone ACPI device object. + * @id: Active cooling level (0 - 9). + * @ret_temp: Address to store the retrieved temperature value on success. + * + * Evaluate the _ACx object for the thermal zone represented by @adev to obtain + * the temperature of the active cooling trip point corresponding to the active + * cooling level given by @id. + * + * Return 0 on success or a negative error value on failure. + */ +int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp) +{ + int temp_decik = 0; + int ret = acpi_active_trip_temp(adev, id, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); +} +EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp); + +/** + * thermal_acpi_passive_trip_temp - Retrieve passive trip point temperature + * @adev: Target thermal zone ACPI device object. + * @ret_temp: Address to store the retrieved temperature value on success. + * + * Evaluate the _PSV object for the thermal zone represented by @adev to obtain + * the temperature of the passive cooling trip point. + * + * Return 0 on success or -ENODATA on failure. + */ +int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + int temp_decik = 0; + int ret = acpi_passive_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); +} +EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp); + +/** + * thermal_acpi_hot_trip_temp - Retrieve hot trip point temperature + * @adev: Target thermal zone ACPI device object. + * @ret_temp: Address to store the retrieved temperature value on success. + * + * Evaluate the _HOT object for the thermal zone represented by @adev to obtain + * the temperature of the trip point at which the system is expected to be put + * into the S4 sleep state. + * + * Return 0 on success or -ENODATA on failure. + */ +int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + int temp_decik = 0; + int ret = acpi_hot_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); +} +EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp); + +/** + * thermal_acpi_critical_trip_temp - Retrieve critical trip point temperature + * @adev: Target thermal zone ACPI device object. + * @ret_temp: Address to store the retrieved temperature value on success. + * + * Evaluate the _CRT object for the thermal zone represented by @adev to obtain + * the temperature of the critical cooling trip point. + * + * Return 0 on success or -ENODATA on failure. + */ +int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + int temp_decik = 0; + int ret = acpi_critical_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); +} +EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp); diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c new file mode 100644 index 000000000000..6353be6fec69 --- /dev/null +++ b/drivers/acpi/tiny-power-button.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/acpi.h> +#include <acpi/button.h> + +MODULE_AUTHOR("Josh Triplett"); +MODULE_DESCRIPTION("ACPI Tiny Power Button Driver"); +MODULE_LICENSE("GPL"); + +static int power_signal __read_mostly = CONFIG_ACPI_TINY_POWER_BUTTON_SIGNAL; +module_param(power_signal, int, 0644); +MODULE_PARM_DESC(power_signal, "Power button sends this signal to init"); + +static const struct acpi_device_id tiny_power_button_device_ids[] = { + { ACPI_BUTTON_HID_POWER, 0 }, + { ACPI_BUTTON_HID_POWERF, 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids); + +static void acpi_tiny_power_button_notify(acpi_handle handle, u32 event, void *data) +{ + kill_cad_pid(power_signal, 1); +} + +static void acpi_tiny_power_button_notify_run(void *not_used) +{ + acpi_tiny_power_button_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, NULL); +} + +static u32 acpi_tiny_power_button_event(void *not_used) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_tiny_power_button_notify_run, NULL); + return ACPI_INTERRUPT_HANDLED; +} + +static int acpi_tiny_power_button_add(struct acpi_device *device) +{ + acpi_status status; + + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { + status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_tiny_power_button_event, + NULL); + } else { + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_tiny_power_button_notify, + NULL); + } + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static void acpi_tiny_power_button_remove(struct acpi_device *device) +{ + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) { + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_tiny_power_button_event); + } else { + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_tiny_power_button_notify); + } + acpi_os_wait_events_complete(); +} + +static struct acpi_driver acpi_tiny_power_button_driver = { + .name = "tiny-power-button", + .class = "tiny-power-button", + .ids = tiny_power_button_device_ids, + .ops = { + .add = acpi_tiny_power_button_add, + .remove = acpi_tiny_power_button_remove, + }, +}; + +module_acpi_driver(acpi_tiny_power_button_driver); diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 78db97687f26..526563a0d188 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -1,24 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_utils.c - ACPI Utility Functions ($Revision: 10 $) * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: utils: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -31,24 +20,12 @@ #include "internal.h" #include "sleep.h" -#define _COMPONENT ACPI_BUS_COMPONENT -ACPI_MODULE_NAME("utils"); - /* -------------------------------------------------------------------------- Object Evaluation Helpers -------------------------------------------------------------------------- */ -static void -acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s) -{ -#ifdef ACPI_DEBUG_OUTPUT - char prefix[80] = {'\0'}; - struct acpi_buffer buffer = {sizeof(prefix), prefix}; - acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n", - (char *) prefix, p, acpi_format_exception(s))); -#else - return; -#endif +static void acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s) +{ + acpi_handle_debug(h, "Evaluate [%s]: %s\n", p, acpi_format_exception(s)); } acpi_status @@ -66,25 +43,24 @@ acpi_extract_package(union acpi_object *package, if (!package || (package->type != ACPI_TYPE_PACKAGE) || (package->package.count < 1)) { - printk(KERN_WARNING PREFIX "Invalid package argument\n"); + pr_debug("Invalid package argument\n"); return AE_BAD_PARAMETER; } if (!format || !format->pointer || (format->length < 1)) { - printk(KERN_WARNING PREFIX "Invalid format argument\n"); + pr_debug("Invalid format argument\n"); return AE_BAD_PARAMETER; } if (!buffer) { - printk(KERN_WARNING PREFIX "Invalid buffer argument\n"); + pr_debug("Invalid buffer argument\n"); return AE_BAD_PARAMETER; } format_count = (format->length / sizeof(char)) - 1; if (format_count > package->package.count) { - printk(KERN_WARNING PREFIX "Format specifies more objects [%d]" - " than exist in package [%d].\n", - format_count, package->package.count); + pr_debug("Format specifies more objects [%d] than present [%d]\n", + format_count, package->package.count); return AE_BAD_DATA; } @@ -112,12 +88,9 @@ acpi_extract_package(union acpi_object *package, tail_offset += sizeof(char *); break; default: - printk(KERN_WARNING PREFIX "Invalid package element" - " [%d]: got number, expecting" - " [%c]\n", - i, format_string[i]); + pr_debug("Invalid package element [%d]: got number, expected [%c]\n", + i, format_string[i]); return AE_BAD_DATA; - break; } break; @@ -137,12 +110,9 @@ acpi_extract_package(union acpi_object *package, tail_offset += sizeof(u8 *); break; default: - printk(KERN_WARNING PREFIX "Invalid package element" - " [%d] got string/buffer," - " expecting [%c]\n", - i, format_string[i]); + pr_debug("Invalid package element [%d] got string/buffer, expected [%c]\n", + i, format_string[i]); return AE_BAD_DATA; - break; } break; case ACPI_TYPE_LOCAL_REFERENCE: @@ -152,23 +122,17 @@ acpi_extract_package(union acpi_object *package, tail_offset += sizeof(void *); break; default: - printk(KERN_WARNING PREFIX "Invalid package element" - " [%d] got reference," - " expecting [%c]\n", - i, format_string[i]); + pr_debug("Invalid package element [%d] got reference, expected [%c]\n", + i, format_string[i]); return AE_BAD_DATA; - break; } break; case ACPI_TYPE_PACKAGE: default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Found unsupported element at index=%d\n", - i)); + pr_debug("Unsupported element at index=%d\n", i); /* TBD: handle nested packages... */ return AE_SUPPORT; - break; } } @@ -306,30 +270,88 @@ acpi_evaluate_integer(acpi_handle handle, *data = element.integer.value; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data)); + acpi_handle_debug(handle, "Return value [%llu]\n", *data); return AE_OK; } EXPORT_SYMBOL(acpi_evaluate_integer); -acpi_status -acpi_evaluate_reference(acpi_handle handle, - acpi_string pathname, - struct acpi_object_list *arguments, - struct acpi_handle_list *list) +int acpi_get_local_u64_address(acpi_handle handle, u64 *addr) +{ + acpi_status status; + + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, addr); + if (ACPI_FAILURE(status)) + return -ENODATA; + return 0; +} +EXPORT_SYMBOL(acpi_get_local_u64_address); + +int acpi_get_local_address(acpi_handle handle, u32 *addr) +{ + u64 adr; + int ret; + + ret = acpi_get_local_u64_address(handle, &adr); + if (ret < 0) + return ret; + *addr = (u32)adr; + return 0; +} +EXPORT_SYMBOL(acpi_get_local_address); + +#define ACPI_MAX_SUB_BUF_SIZE 9 + +const char *acpi_get_subsystem_id(acpi_handle handle) { - acpi_status status = AE_OK; - union acpi_object *package = NULL; - union acpi_object *element = NULL; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - u32 i = 0; + union acpi_object *obj; + acpi_status status; + const char *sub; + size_t len; + status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status); + return ERR_PTR(-ENODATA); + } - if (!list) { - return AE_BAD_PARAMETER; + obj = buffer.pointer; + if (obj->type == ACPI_TYPE_STRING) { + len = strlen(obj->string.pointer); + if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) { + sub = kstrdup(obj->string.pointer, GFP_KERNEL); + if (!sub) + sub = ERR_PTR(-ENOMEM); + } else { + acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len); + sub = ERR_PTR(-ENODATA); + } + } else { + acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n"); + sub = ERR_PTR(-ENODATA); } + acpi_os_free(buffer.pointer); + + return sub; +} +EXPORT_SYMBOL_GPL(acpi_get_subsystem_id); + +bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *package; + acpi_status status; + bool ret = false; + u32 i; + + if (!list) + return false; + /* Evaluate object. */ status = acpi_evaluate_object(handle, pathname, arguments, &buffer); @@ -338,66 +360,141 @@ acpi_evaluate_reference(acpi_handle handle, package = buffer.pointer; - if ((buffer.length == 0) || !package) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } - if (package->type != ACPI_TYPE_PACKAGE) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } - if (!package->package.count) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } + if (buffer.length == 0 || !package || + package->type != ACPI_TYPE_PACKAGE || !package->package.count) + goto err; - if (package->package.count > ACPI_MAX_HANDLES) { - kfree(package); - return AE_NO_MEMORY; - } list->count = package->package.count; + list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL); + if (!list->handles) + goto err_clear; /* Extract package data. */ for (i = 0; i < list->count; i++) { + union acpi_object *element = &(package->package.elements[i]); - element = &(package->package.elements[i]); - - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - break; - } + if (element->type != ACPI_TYPE_LOCAL_REFERENCE || + !element->reference.handle) + goto err_free; - if (!element->reference.handle) { - status = AE_NULL_ENTRY; - acpi_util_eval_error(handle, pathname, status); - break; - } /* Get the acpi_handle. */ list->handles[i] = element->reference.handle; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n", - list->handles[i])); + acpi_handle_debug(list->handles[i], "Found in reference list\n"); } - end: - if (ACPI_FAILURE(status)) { - list->count = 0; - //kfree(list->handles); - } + ret = true; +end: kfree(buffer.pointer); - return status; + return ret; + +err_free: + kfree(list->handles); + list->handles = NULL; + +err_clear: + list->count = 0; + +err: + acpi_util_eval_error(handle, pathname, status); + goto end; } EXPORT_SYMBOL(acpi_evaluate_reference); -acpi_status +/** + * acpi_handle_list_equal - Check if two ACPI handle lists are the same + * @list1: First list to compare. + * @list2: Second list to compare. + * + * Return true if the given ACPI handle lists are of the same size and + * contain the same ACPI handles in the same order. Otherwise, return false. + */ +bool acpi_handle_list_equal(struct acpi_handle_list *list1, + struct acpi_handle_list *list2) +{ + return list1->count == list2->count && + !memcmp(list1->handles, list2->handles, + list1->count * sizeof(*list1->handles)); +} +EXPORT_SYMBOL_GPL(acpi_handle_list_equal); + +/** + * acpi_handle_list_replace - Replace one ACPI handle list with another + * @dst: ACPI handle list to replace. + * @src: Source ACPI handle list. + * + * Free the handles table in @dst, move the handles table from @src to @dst, + * copy count from @src to @dst and clear @src. + */ +void acpi_handle_list_replace(struct acpi_handle_list *dst, + struct acpi_handle_list *src) +{ + if (dst->count) + kfree(dst->handles); + + dst->count = src->count; + dst->handles = src->handles; + + src->handles = NULL; + src->count = 0; +} +EXPORT_SYMBOL_GPL(acpi_handle_list_replace); + +/** + * acpi_handle_list_free - Free the handles table in an ACPI handle list + * @list: ACPI handle list to free. + * + * Free the handles table in @list and clear its count field. + */ +void acpi_handle_list_free(struct acpi_handle_list *list) +{ + if (!list->count) + return; + + kfree(list->handles); + list->count = 0; +} +EXPORT_SYMBOL_GPL(acpi_handle_list_free); + +/** + * acpi_device_dep - Check ACPI device dependency + * @target: ACPI handle of the target ACPI device. + * @match: ACPI handle to look up in the target's _DEP list. + * + * Return true if @match is present in the list returned by _DEP for + * @target or false otherwise. + */ +bool acpi_device_dep(acpi_handle target, acpi_handle match) +{ + struct acpi_handle_list dep_devices; + bool ret = false; + int i; + + if (!acpi_has_method(target, "_DEP")) + return false; + + if (!acpi_evaluate_reference(target, "_DEP", NULL, &dep_devices)) { + acpi_handle_debug(target, "Failed to evaluate _DEP.\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { + if (dep_devices.handles[i] == match) { + ret = true; + break; + } + } + + acpi_handle_list_free(&dep_devices); + return ret; +} +EXPORT_SYMBOL_GPL(acpi_device_dep); + +bool acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) { acpi_status status; @@ -405,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld union acpi_object *output; status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer); - if (ACPI_FAILURE(status)) - return status; + return false; output = buffer.pointer; @@ -426,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld out: kfree(buffer.pointer); - return status; + return ACPI_SUCCESS(status); } EXPORT_SYMBOL(acpi_get_physical_device_location); @@ -468,10 +564,11 @@ EXPORT_SYMBOL(acpi_evaluate_ost); /** * acpi_handle_path: Return the object path of handle + * @handle: ACPI device handle * * Caller must free the returned buffer */ -static char *acpi_handle_path(acpi_handle handle) +char *acpi_handle_path(acpi_handle handle) { struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, @@ -486,6 +583,9 @@ static char *acpi_handle_path(acpi_handle handle) /** * acpi_handle_printk: Print message with ACPI prefix and object path + * @level: log level + * @handle: ACPI device handle + * @fmt: format string * * This function is called through acpi_handle_<level> macros and prints * a message with ACPI prefix and object path. This function acquires @@ -504,7 +604,7 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) vaf.va = &args; path = acpi_handle_path(handle); - printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf); + printk("%sACPI: %s: %pV", level, path ? path : "<n/a>", &vaf); va_end(args); kfree(path); @@ -514,6 +614,9 @@ EXPORT_SYMBOL(acpi_handle_printk); #if defined(CONFIG_DYNAMIC_DEBUG) /** * __acpi_handle_debug: pr_debug with ACPI prefix and object path + * @descriptor: Dynamic Debug descriptor + * @handle: ACPI device handle + * @fmt: format string * * This function is called through acpi_handle_debug macro and debug * prints a message with ACPI prefix and object path. This function @@ -542,6 +645,20 @@ EXPORT_SYMBOL(__acpi_handle_debug); #endif /** + * acpi_evaluation_failure_warn - Log evaluation failure warning. + * @handle: Parent object handle. + * @name: Name of the object whose evaluation has failed. + * @status: Status value returned by the failing object evaluation. + */ +void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, + acpi_status status) +{ + acpi_handle_warn(handle, "%s evaluation failed: %s\n", name, + acpi_format_exception(status)); +} +EXPORT_SYMBOL_GPL(acpi_evaluation_failure_warn); + +/** * acpi_has_method: Check whether @handle has a method named @name * @handle: ACPI device handle * @name: name of object or method @@ -612,6 +729,31 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock) } /** + * acpi_evaluate_reg: Evaluate _REG method to register OpRegion presence + * @handle: ACPI device handle + * @space_id: ACPI address space id to register OpRegion presence for + * @function: Parameter to pass to _REG one of ACPI_REG_CONNECT or + * ACPI_REG_DISCONNECT + * + * Evaluate device's _REG method to register OpRegion presence. + */ +acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function) +{ + struct acpi_object_list arg_list; + union acpi_object params[2]; + + params[0].type = ACPI_TYPE_INTEGER; + params[0].integer.value = space_id; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = function; + arg_list.count = 2; + arg_list.pointer = params; + + return acpi_evaluate_object(handle, "_REG", &arg_list, NULL); +} +EXPORT_SYMBOL(acpi_evaluate_reg); + +/** * acpi_evaluate_dsm - evaluate device's _DSM method * @handle: ACPI device handle * @guid: GUID of requested functions, should be 16 bytes @@ -658,7 +800,8 @@ acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, if (ret != AE_NOT_FOUND) acpi_handle_warn(handle, - "failed to evaluate _DSM (0x%x)\n", ret); + "failed to evaluate _DSM %pUb rev:%lld func:%lld (0x%x)\n", + guid, rev, func, ret); return NULL; } @@ -708,6 +851,30 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs) EXPORT_SYMBOL(acpi_check_dsm); /** + * acpi_dev_uid_to_integer - treat ACPI device _UID as integer + * @adev: ACPI device to get _UID from + * @integer: output buffer for integer + * + * Considers _UID as integer and converts it to @integer. + * + * Returns 0 on success, or negative error code otherwise. + */ +int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) +{ + const char *uid; + + if (!adev) + return -ENODEV; + + uid = acpi_device_uid(adev); + if (!uid) + return -ENODATA; + + return kstrtou64(uid, 0, integer); +} +EXPORT_SYMBOL(acpi_dev_uid_to_integer); + +/** * acpi_dev_found - Detect presence of a given ACPI device in the namespace. * @hid: Hardware ID of the device. * @@ -738,28 +905,24 @@ bool acpi_dev_found(const char *hid) EXPORT_SYMBOL(acpi_dev_found); struct acpi_dev_match_info { - const char *dev_name; struct acpi_device_id hid[2]; const char *uid; s64 hrv; }; -static int acpi_dev_match_cb(struct device *dev, void *data) +static int acpi_dev_match_cb(struct device *dev, const void *data) { struct acpi_device *adev = to_acpi_device(dev); - struct acpi_dev_match_info *match = data; + const struct acpi_dev_match_info *match = data; unsigned long long hrv; acpi_status status; if (acpi_match_device_ids(adev, match->hid)) return 0; - if (match->uid && (!adev->pnp.unique_id || - strcmp(adev->pnp.unique_id, match->uid))) + if (match->uid && !acpi_dev_uid_match(adev, match->uid)) return 0; - match->dev_name = acpi_dev_name(adev); - if (match->hrv == -1) return 1; @@ -780,7 +943,7 @@ static int acpi_dev_match_cb(struct device *dev, void *data) * Note that if the device is pluggable, it may since have disappeared. * * Note that unlike acpi_dev_found() this function checks the status - * of the device. So for devices which are present in the dsdt, but + * of the device. So for devices which are present in the DSDT, but * which are disabled (their _STA callback returns 0) this function * will return false. * @@ -795,40 +958,79 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) struct acpi_dev_match_info match = {}; struct device *dev; - strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + put_device(dev); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); /** - * acpi_dev_get_first_match_name - Return name of first match of ACPI device + * acpi_dev_get_next_match_dev - Return the next match of ACPI device + * @adev: Pointer to the previous ACPI device matching this @hid, @uid and @hrv * @hid: Hardware ID of the device. * @uid: Unique ID of the device, pass NULL to not check _UID * @hrv: Hardware Revision of the device, pass -1 to not check _HRV * - * Return device name if a matching device was present + * Return the next match of ACPI device if another matching device was present * at the moment of invocation, or NULL otherwise. * + * The caller is responsible for invoking acpi_dev_put() on the returned device. + * On the other hand the function invokes acpi_dev_put() on the given @adev + * assuming that its reference counter had been increased beforehand. + * * See additional information in acpi_dev_present() as well. */ -const char * -acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +struct acpi_device * +acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv) { + struct device *start = adev ? &adev->dev : NULL; struct acpi_dev_match_info match = {}; struct device *dev; - strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; - dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); - return dev ? match.dev_name : NULL; + dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb); + acpi_dev_put(adev); + return dev ? to_acpi_device(dev) : NULL; +} +EXPORT_SYMBOL(acpi_dev_get_next_match_dev); + +/** + * acpi_dev_get_first_match_dev - Return the first match of ACPI device + * @hid: Hardware ID of the device. + * @uid: Unique ID of the device, pass NULL to not check _UID + * @hrv: Hardware Revision of the device, pass -1 to not check _HRV + * + * Return the first match of ACPI device if a matching device was present + * at the moment of invocation, or NULL otherwise. + * + * The caller is responsible for invoking acpi_dev_put() on the returned device. + * + * See additional information in acpi_dev_present() as well. + */ +struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) +{ + return acpi_dev_get_next_match_dev(NULL, hid, uid, hrv); +} +EXPORT_SYMBOL(acpi_dev_get_first_match_dev); + +/** + * acpi_reduced_hardware - Return if this is an ACPI-reduced-hw machine + * + * Return true when running on an ACPI-reduced-hw machine, false otherwise. + */ +bool acpi_reduced_hardware(void) +{ + return acpi_gbl_reduced_hardware; } -EXPORT_SYMBOL(acpi_dev_get_first_match_name); +EXPORT_SYMBOL_GPL(acpi_reduced_hardware); /* * acpi_backlight= handling, this is done here rather then in video_detect.c @@ -839,7 +1041,7 @@ EXPORT_SYMBOL(acpi_video_backlight_string); static int __init acpi_backlight(char *str) { - strlcpy(acpi_video_backlight_string, str, + strscpy(acpi_video_backlight_string, str, sizeof(acpi_video_backlight_string)); return 1; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 43587ac680e4..4cf74f173c78 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -17,8 +17,9 @@ * Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop, * sony_acpi,... can take care about backlight brightness. * - * Backlight drivers can use acpi_video_get_backlight_type() to determine - * which driver should handle the backlight. + * Backlight drivers can use acpi_video_get_backlight_type() to determine which + * driver should handle the backlight. RAW/GPU-driver backlight drivers must + * use the acpi_video_backlight_use_native() helper for this. * * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) * this file will not be compiled and acpi_video_get_backlight_type() will @@ -27,23 +28,17 @@ #include <linux/export.h> #include <linux/acpi.h> +#include <linux/apple-gmux.h> #include <linux/backlight.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h> +#include <linux/pnp.h> #include <linux/types.h> #include <linux/workqueue.h> #include <acpi/video.h> -ACPI_MODULE_NAME("video"); -#define _COMPONENT ACPI_VIDEO_COMPONENT - -void acpi_video_unregister_backlight(void); - -static bool backlight_notifier_registered; -static struct notifier_block backlight_nb; -static struct work_struct backlight_notify_work; - static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; @@ -55,6 +50,12 @@ static void acpi_video_parse_cmdline(void) acpi_backlight_cmdline = acpi_backlight_video; if (!strcmp("native", acpi_video_backlight_string)) acpi_backlight_cmdline = acpi_backlight_native; + if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec; + if (!strcmp("apple_gmux", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_apple_gmux; + if (!strcmp("dell_uart", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_dell_uart; if (!strcmp("none", acpi_video_backlight_string)) acpi_backlight_cmdline = acpi_backlight_none; } @@ -62,18 +63,16 @@ static void acpi_video_parse_cmdline(void) static acpi_status find_video(acpi_handle handle, u32 lvl, void *context, void **rv) { + struct acpi_device *acpi_dev = acpi_fetch_acpi_dev(handle); long *cap = context; struct pci_dev *dev; - struct acpi_device *acpi_dev; static const struct acpi_device_id video_ids[] = { {ACPI_VIDEO_HID, 0}, {"", 0}, }; - if (acpi_bus_get_device(handle, &acpi_dev)) - return AE_OK; - if (!acpi_match_device_ids(acpi_dev, video_ids)) { + if (acpi_dev && !acpi_match_device_ids(acpi_dev, video_ids)) { dev = acpi_get_pci_dev(handle); if (!dev) return AE_OK; @@ -83,6 +82,36 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } +/* This depends on ACPI_WMI which is X86 only */ +#ifdef CONFIG_X86 +static bool nvidia_wmi_ec_supported(void) +{ + struct wmi_brightness_args args = { + .mode = WMI_BRIGHTNESS_MODE_GET, + .val = 0, + .ret = 0, + }; + struct acpi_buffer buf = { (acpi_size)sizeof(args), &args }; + acpi_status status; + + status = wmi_evaluate_method(WMI_BRIGHTNESS_GUID, 0, + WMI_BRIGHTNESS_METHOD_SOURCE, &buf, &buf); + if (ACPI_FAILURE(status)) + return false; + + /* + * If brightness is handled by the EC then nvidia-wmi-ec-backlight + * should be used, else the GPU driver(s) should be used. + */ + return args.ret == WMI_BRIGHTNESS_SOURCE_EC; +} +#else +static bool nvidia_wmi_ec_supported(void) +{ + return false; +} +#endif + /* Force to use vendor driver when the ACPI device is known to be * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) @@ -103,44 +132,194 @@ static int video_detect_force_native(const struct dmi_system_id *d) return 0; } -static int video_detect_force_none(const struct dmi_system_id *d) +static int video_detect_portege_r100(const struct dmi_system_id *d) { - acpi_backlight_dmi = acpi_backlight_none; + struct pci_dev *dev; + /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */ + dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL); + if (dev) + acpi_backlight_dmi = acpi_backlight_vendor; return 0; } static const struct dmi_system_id video_detect_dmi_table[] = { - /* On Samsung X360, the BIOS will set a flag (VDRV) if generic - * ACPI backlight device is used. This flag will definitively break - * the backlight interface (even the vendor interface) untill next - * reboot. It's why we should prevent video.ko from being used here - * and we can't rely on a later call to acpi_video_unregister(). + /* + * Models which should use the vendor backlight interface, + * because of broken ACPI video backlight control. */ { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */ .callback = video_detect_force_vendor, - .ident = "X360", + /* Acer KAV80 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X360"), - DMI_MATCH(DMI_BOARD_NAME, "X360"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), }, }, { - .callback = video_detect_force_vendor, - .ident = "Asus UL30VT", - .matches = { + .callback = video_detect_force_vendor, + /* Asus UL30VT */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), }, }, { - .callback = video_detect_force_vendor, - .ident = "Asus UL30A", - .matches = { + .callback = video_detect_force_vendor, + /* Asus UL30A */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), }, }, + { + .callback = video_detect_force_vendor, + /* Asus X55U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X55U"), + }, + }, + { + /* https://bugs.launchpad.net/bugs/1000146 */ + .callback = video_detect_force_vendor, + /* Asus X101CH */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus X401U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X401U"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus X501U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X501U"), + }, + }, + { + /* https://bugs.launchpad.net/bugs/1000146 */ + .callback = video_detect_force_vendor, + /* Asus 1015CX */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Samsung N150/N210/N220 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Samsung NF110/NF210/NF310 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), + DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Samsung NC210 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"), + DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"), + }, + }, + + /* + * Models which should use the vendor backlight interface, + * because of broken native backlight control. + */ + { + .callback = video_detect_force_vendor, + /* Sony Vaio PCG-FRV35 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Panasonic Toughbook CF-18 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"), + DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), + }, + }, + + /* + * Toshiba models with Transflective display, these need to use + * the toshiba_acpi vendor driver for proper Transflective handling. + */ + { + .callback = video_detect_force_vendor, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R500"), + }, + }, + { + .callback = video_detect_force_vendor, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R600"), + }, + }, + + /* + * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi + * vendor driver. But none of them gets activated as it has a VGA with + * no kernel driver (Trident CyberBlade XP4m32). + * The DMI strings are generic so check for the VGA chip in callback. + */ + { + .callback = video_detect_portege_r100, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), + DMI_MATCH(DMI_BOARD_NAME, "Portable PC") + }, + }, + + /* + * Models which need acpi_video backlight control where the GPU drivers + * do not call acpi_video_register_backlight() because no internal panel + * is detected. Typically these are all-in-ones (monitors with builtin + * PC) where the panel connection shows up as regular DP instead of eDP. + */ + { + .callback = video_detect_force_video, + /* Apple iMac14,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"), + }, + }, + { + .callback = video_detect_force_video, + /* Apple iMac14,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"), + }, + }, /* * These models have a working acpi_video backlight control, and using @@ -151,7 +330,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { */ { .callback = video_detect_force_video, - .ident = "ThinkPad T420", + /* ThinkPad T420 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), @@ -159,7 +338,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_video, - .ident = "ThinkPad T520", + /* ThinkPad T520 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), @@ -167,26 +346,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_video, - .ident = "ThinkPad X201s", + /* ThinkPad X201s */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), }, }, - { - .callback = video_detect_force_video, - .ident = "ThinkPad X201T", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"), - }, - }, + { + .callback = video_detect_force_video, + /* ThinkPad X201T */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"), + }, + }, /* The native backlight controls do not work on some older machines */ { /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ .callback = video_detect_force_video, - .ident = "HP ENVY 15 Notebook", + /* HP ENVY 15 Notebook */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), @@ -194,7 +373,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_video, - .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", + /* SAMSUNG 870Z5E/880Z5E/680Z5E */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), @@ -202,7 +381,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_video, - .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", + /* SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, @@ -212,7 +391,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ .callback = video_detect_force_video, - .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", + /* SAMSUNG 3570R/370R/470R/450R/510R/4450RV */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, @@ -222,7 +401,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */ .callback = video_detect_force_video, - .ident = "SAMSUNG 670Z5E", + /* SAMSUNG 670Z5E */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"), @@ -231,7 +410,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ .callback = video_detect_force_video, - .ident = "SAMSUNG 730U3E/740U3E", + /* SAMSUNG 730U3E/740U3E */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), @@ -240,7 +419,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ .callback = video_detect_force_video, - .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", + /* SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, @@ -250,7 +429,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1272633 */ .callback = video_detect_force_video, - .ident = "Dell XPS14 L421X", + /* Dell XPS14 L421X */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), @@ -259,7 +438,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ .callback = video_detect_force_video, - .ident = "Dell XPS15 L521X", + /* Dell XPS15 L521X */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), @@ -268,36 +447,144 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.kernel.org/show_bug.cgi?id=108971 */ .callback = video_detect_force_video, - .ident = "SAMSUNG 530U4E/540U4E", + /* SAMSUNG 530U4E/540U4E */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"), }, }, + { + /* https://bugs.launchpad.net/bugs/1894667 */ + .callback = video_detect_force_video, + /* HP 635 Notebook */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"), + }, + }, /* Non win8 machines which need native backlight nevertheless */ { /* https://bugzilla.redhat.com/show_bug.cgi?id=1201530 */ .callback = video_detect_force_native, - .ident = "Lenovo Ideapad S405", + /* Lenovo Ideapad S405 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"), }, }, { + /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */ + .callback = video_detect_force_native, + /* Lenovo Ideapad Z470 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ .callback = video_detect_force_native, - .ident = "Lenovo Ideapad Z570", + /* Lenovo Ideapad Z570 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), + }, + }, + { + .callback = video_detect_force_native, + /* Lenovo E41-25 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FS"), + }, + }, + { + .callback = video_detect_force_native, + /* Lenovo E41-45 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82BK"), + }, + }, + { + .callback = video_detect_force_native, + /* Lenovo Slim 7 16ARH7 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + DMI_MATCH(DMI_PRODUCT_NAME, "82UX"), + }, + }, + { + .callback = video_detect_force_native, + /* Lenovo ThinkPad X131e (3371 AMD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "3371"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple iMac11,3 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */ + .callback = video_detect_force_native, + /* Apple iMac12,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */ + .callback = video_detect_force_native, + /* Apple iMac12,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple MacBook Air 7,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple MacBook Air 9,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple MacBook Pro 9,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple MacBook Pro 11,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"), }, }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, - .ident = "Apple MacBook Pro 12,1", + /* Apple MacBook Pro 12,1 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"), @@ -305,7 +592,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, - .ident = "Dell Vostro V131", + /* Apple MacBook Pro 16,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"), + }, + }, + { + .callback = video_detect_force_native, + /* Dell Inspiron N4010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N4010"), + }, + }, + { + .callback = video_detect_force_native, + /* Dell Vostro V131 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), @@ -314,7 +617,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { /* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */ .callback = video_detect_force_native, - .ident = "Dell XPS 17 L702X", + /* Dell XPS 17 L702X */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), @@ -322,60 +625,366 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, - .ident = "Dell Precision 7510", + /* Dell Precision 7510 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, { - .callback = video_detect_force_none, - .ident = "Dell OptiPlex 9020M", + .callback = video_detect_force_native, + /* Dell Studio 1569 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"), + }, + }, + { + .callback = video_detect_force_native, + /* Acer Aspire 3830TG */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"), + }, + }, + { + .callback = video_detect_force_native, + /* Acer Aspire 4810T */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"), + }, + }, + { + .callback = video_detect_force_native, + /* Acer Aspire 5738z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_BOARD_NAME, "JV50"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1012674 */ + .callback = video_detect_force_native, + /* Acer Aspire 5741 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=42993 */ + .callback = video_detect_force_native, + /* Acer Aspire 5750 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=42833 */ + .callback = video_detect_force_native, + /* Acer Extensa 5235 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"), + }, + }, + { + .callback = video_detect_force_native, + /* Acer TravelMate 4750 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + .callback = video_detect_force_native, + /* Acer TravelMate 5735Z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=36322 */ + .callback = video_detect_force_native, + /* Acer TravelMate 5760 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"), + }, + }, + { + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA401 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401"), + }, + }, + { + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA502 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502"), + }, + }, + { + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA503 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), + }, + }, + { + .callback = video_detect_force_native, + /* Asus U46E */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "U46E"), + }, + }, + { + .callback = video_detect_force_native, + /* Asus UX303UB */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"), + }, + }, + { + .callback = video_detect_force_native, + /* HP EliteBook 8460p */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"), + }, + }, + { + .callback = video_detect_force_native, + /* HP Pavilion g6-1d80nr / B4U19UA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), + DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"), + }, + }, + { + .callback = video_detect_force_native, + /* Samsung N150P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), + DMI_MATCH(DMI_BOARD_NAME, "N150P"), + }, + }, + { + .callback = video_detect_force_native, + /* Samsung N145P/N250P/N260P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), + DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), + }, + }, + { + .callback = video_detect_force_native, + /* Samsung N250P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), + DMI_MATCH(DMI_BOARD_NAME, "N250P"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=202401 */ + .callback = video_detect_force_native, + /* Sony Vaio VPCEH3U1E */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"), + }, + }, + { + .callback = video_detect_force_native, + /* Sony Vaio VPCY11S1E */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCY11S1E"), + }, + }, + + /* + * These Toshibas have a broken acpi-video interface for brightness + * control. They also have an issue where the panel is off after + * suspend until a special firmware call is made to turn it back + * on. This is handled by the toshiba_acpi kernel module, so that + * module must be enabled for these models to work correctly. + */ + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ + .callback = video_detect_force_native, + /* Toshiba Portégé R700 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"), + }, + }, + { + /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ + /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ + .callback = video_detect_force_native, + /* Toshiba Satellite/Portégé R830 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "R830"), + }, + }, + { + .callback = video_detect_force_native, + /* Toshiba Satellite/Portégé Z830 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Z830"), + }, + }, + + /* + * Dell AIO (All in Ones) which advertise an UART attached backlight + * controller board in their ACPI tables (and may even have one), but + * which need native backlight control nevertheless. + */ + { + /* https://github.com/zabbly/linux/issues/26 */ + .callback = video_detect_force_native, + /* Dell OptiPlex 5480 AIO */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 5480 AIO"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */ + .callback = video_detect_force_native, + /* Dell OptiPlex 7760 AIO */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7760 AIO"), + }, + }, + + /* + * Models which have nvidia-ec-wmi support, but should not use it. + * Note this indicates a likely firmware bug on these models and should + * be revisited if/when Linux gets support for dynamic mux mode. + */ + { + .callback = video_detect_force_native, + /* Dell G15 5515 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"), + }, + }, + { + .callback = video_detect_force_native, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"), + }, + }, + + /* + * x86 android tablets which directly control the backlight through + * an external backlight controller, typically TI's LP8557. + * The backlight is directly controlled by the lp855x driver on these. + * This setup means that neither i915's native nor acpi_video backlight + * control works. Add a "vendor" quirk to disable both. Note these + * devices do not use vendor control in the typical meaning of + * vendor specific SMBIOS or ACPI calls being used. + */ + { + .callback = video_detect_force_vendor, + /* Lenovo Yoga Book X90F / X90L */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + }, + { + .callback = video_detect_force_vendor, + /* + * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10" + * Lenovo Yoga Tablet 2 use the same mainboard) + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Lenovo Yoga Tab 3 Pro YT3-X90F */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Xiaomi Mi Pad 2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), + }, + }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */ + { + .callback = video_detect_force_native, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82K8"), }, }, { }, }; -/* This uses a workqueue to avoid various locking ordering issues */ -static void acpi_video_backlight_notify_work(struct work_struct *work) +static bool google_cros_ec_present(void) { - if (acpi_video_get_backlight_type() != acpi_backlight_video) - acpi_video_unregister_backlight(); + return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C"); } -static int acpi_video_backlight_notify(struct notifier_block *nb, - unsigned long val, void *bd) +/* + * Windows 8 and newer no longer use the ACPI video interface, so it often + * does not work. So on win8+ systems prefer native brightness control. + * Chromebooks should always prefer native backlight control. + */ +static bool prefer_native_over_acpi_video(void) { - struct backlight_device *backlight = bd; - - /* A raw bl registering may change video -> native */ - if (backlight->props.type == BACKLIGHT_RAW && - val == BACKLIGHT_REGISTERED) - schedule_work(&backlight_notify_work); - - return NOTIFY_OK; + return acpi_osi_is_win8() || google_cros_ec_present(); } /* * Determine which type of backlight interface to use on this system, * First check cmdline, then dmi quirks, then do autodetect. - * - * The autodetect order is: - * 1) Is the acpi-video backlight interface supported -> - * no, use a vendor interface - * 2) Is this a win8 "ready" BIOS and do we have a native interface -> - * yes, use a native interface - * 3) Else use the acpi-video interface - * - * Arguably the native on win8 check should be done first, but that would - * be a behavior change, which may causes issues. */ -enum acpi_backlight_type acpi_video_get_backlight_type(void) +enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect) { static DEFINE_MUTEX(init_mutex); + static bool nvidia_wmi_ec_present; + static bool apple_gmux_present; + static bool dell_uart_present; + static bool native_available; static bool init_done; static long video_caps; @@ -387,48 +996,73 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void) acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_video, NULL, &video_caps, NULL); - INIT_WORK(&backlight_notify_work, - acpi_video_backlight_notify_work); - backlight_nb.notifier_call = acpi_video_backlight_notify; - backlight_nb.priority = 0; - if (backlight_register_notifier(&backlight_nb) == 0) - backlight_notifier_registered = true; + nvidia_wmi_ec_present = nvidia_wmi_ec_supported(); + apple_gmux_present = apple_gmux_detect(NULL, NULL); + dell_uart_present = acpi_dev_present("DELL0501", NULL, -1); init_done = true; } + if (native) + native_available = true; mutex_unlock(&init_mutex); + if (auto_detect) + *auto_detect = false; + + /* + * The below heuristics / detection steps are in order of descending + * presedence. The commandline takes presedence over anything else. + */ if (acpi_backlight_cmdline != acpi_backlight_undef) return acpi_backlight_cmdline; + /* DMI quirks override any autodetection. */ if (acpi_backlight_dmi != acpi_backlight_undef) return acpi_backlight_dmi; - if (!(video_caps & ACPI_VIDEO_BACKLIGHT)) - return acpi_backlight_vendor; + if (auto_detect) + *auto_detect = true; - if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW)) - return acpi_backlight_native; + /* Special cases such as nvidia_wmi_ec and apple gmux. */ + if (nvidia_wmi_ec_present) + return acpi_backlight_nvidia_wmi_ec; - return acpi_backlight_video; -} -EXPORT_SYMBOL(acpi_video_get_backlight_type); + if (apple_gmux_present) + return acpi_backlight_apple_gmux; -/* - * Set the preferred backlight interface type based on DMI info. - * This function allows DMI blacklists to be implemented by external - * platform drivers instead of putting a big blacklist in video_detect.c - */ -void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) -{ - acpi_backlight_dmi = type; - /* Remove acpi-video backlight interface if it is no longer desired */ - if (acpi_video_get_backlight_type() != acpi_backlight_video) - acpi_video_unregister_backlight(); -} -EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type); + if (dell_uart_present) + return acpi_backlight_dell_uart; -void __exit acpi_video_detect_exit(void) -{ - if (backlight_notifier_registered) - backlight_unregister_notifier(&backlight_nb); + /* Use ACPI video if available, except when native should be preferred. */ + if ((video_caps & ACPI_VIDEO_BACKLIGHT) && + !(native_available && prefer_native_over_acpi_video())) + return acpi_backlight_video; + + /* Use native if available */ + if (native_available) + return acpi_backlight_native; + + /* + * The vendor specific BIOS interfaces are only necessary for + * laptops from before ~2008. + * + * For laptops from ~2008 till ~2023 this point is never reached + * because on those (video_caps & ACPI_VIDEO_BACKLIGHT) above is true. + * + * Laptops from after ~2023 no longer support ACPI_VIDEO_BACKLIGHT, + * if this point is reached on those, this likely means that + * the GPU kms driver which sets native_available has not loaded yet. + * + * Returning acpi_backlight_vendor in this case is known to sometimes + * cause a non working vendor specific /sys/class/backlight device to + * get registered. + * + * Return acpi_backlight_none on laptops with ACPI tables written + * for Windows 8 (laptops from after ~2012) to avoid this problem. + */ + if (acpi_osi_is_win8()) + return acpi_backlight_none; + + /* No ACPI video/native (old hw), use vendor specific fw methods. */ + return acpi_backlight_vendor; } +EXPORT_SYMBOL(__acpi_video_get_backlight_type); diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c new file mode 100644 index 000000000000..c13a20365c2c --- /dev/null +++ b/drivers/acpi/viot.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtual I/O topology + * + * The Virtual I/O Translation Table (VIOT) describes the topology of + * para-virtual IOMMUs and the endpoints they manage. The OS uses it to + * initialize devices in the right order, preventing endpoints from issuing DMA + * before their IOMMU is ready. + * + * When binding a driver to a device, before calling the device driver's probe() + * method, the driver infrastructure calls dma_configure(). At that point the + * VIOT driver looks for an IOMMU associated to the device in the VIOT table. + * If an IOMMU exists and has been initialized, the VIOT driver initializes the + * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU + * ops when the device driver configures DMA mappings. If an IOMMU exists and + * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing + * the device until the IOMMU is available. + */ +#define pr_fmt(fmt) "ACPI: VIOT: " fmt + +#include <linux/acpi_viot.h> +#include <linux/iommu.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/property.h> + +struct viot_iommu { + /* Node offset within the table */ + unsigned int offset; + struct fwnode_handle *fwnode; + struct list_head list; +}; + +struct viot_endpoint { + union { + /* PCI range */ + struct { + u16 segment_start; + u16 segment_end; + u16 bdf_start; + u16 bdf_end; + }; + /* MMIO */ + u64 address; + }; + u32 endpoint_id; + struct viot_iommu *viommu; + struct list_head list; +}; + +static struct acpi_table_viot *viot; +static LIST_HEAD(viot_iommus); +static LIST_HEAD(viot_pci_ranges); +static LIST_HEAD(viot_mmio_endpoints); + +static int __init viot_check_bounds(const struct acpi_viot_header *hdr) +{ + struct acpi_viot_header *start, *end, *hdr_end; + + start = ACPI_ADD_PTR(struct acpi_viot_header, viot, + max_t(size_t, sizeof(*viot), viot->node_offset)); + end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length); + hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr)); + + if (hdr < start || hdr_end > end) { + pr_err(FW_BUG "Node pointer overflows\n"); + return -EOVERFLOW; + } + if (hdr->length < sizeof(*hdr)) { + pr_err(FW_BUG "Empty node\n"); + return -EINVAL; + } + return 0; +} + +static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu, + u16 segment, u16 bdf) +{ + struct pci_dev *pdev; + struct fwnode_handle *fwnode; + + pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf), + bdf & 0xff); + if (!pdev) { + pr_err("Could not find PCI IOMMU\n"); + return -ENODEV; + } + + fwnode = dev_fwnode(&pdev->dev); + if (!fwnode) { + /* + * PCI devices aren't necessarily described by ACPI. Create a + * fwnode so the IOMMU subsystem can identify this device. + */ + fwnode = acpi_alloc_fwnode_static(); + if (!fwnode) { + pci_dev_put(pdev); + return -ENOMEM; + } + set_primary_fwnode(&pdev->dev, fwnode); + } + viommu->fwnode = dev_fwnode(&pdev->dev); + pci_dev_put(pdev); + return 0; +} + +static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu, + u64 address) +{ + struct acpi_device *adev; + struct resource res = { + .start = address, + .end = address, + .flags = IORESOURCE_MEM, + }; + + adev = acpi_resource_consumer(&res); + if (!adev) { + pr_err("Could not find MMIO IOMMU\n"); + return -EINVAL; + } + viommu->fwnode = &adev->fwnode; + return 0; +} + +static struct viot_iommu * __init viot_get_iommu(unsigned int offset) +{ + int ret; + struct viot_iommu *viommu; + struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header, + viot, offset); + union { + struct acpi_viot_virtio_iommu_pci pci; + struct acpi_viot_virtio_iommu_mmio mmio; + } *node = (void *)hdr; + + list_for_each_entry(viommu, &viot_iommus, list) + if (viommu->offset == offset) + return viommu; + + if (viot_check_bounds(hdr)) + return NULL; + + viommu = kzalloc(sizeof(*viommu), GFP_KERNEL); + if (!viommu) + return NULL; + + viommu->offset = offset; + switch (hdr->type) { + case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI: + if (hdr->length < sizeof(node->pci)) + goto err_free; + + ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment, + node->pci.bdf); + break; + case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO: + if (hdr->length < sizeof(node->mmio)) + goto err_free; + + ret = viot_get_mmio_iommu_fwnode(viommu, + node->mmio.base_address); + break; + default: + ret = -EINVAL; + } + if (ret) + goto err_free; + + list_add(&viommu->list, &viot_iommus); + return viommu; + +err_free: + kfree(viommu); + return NULL; +} + +static int __init viot_parse_node(const struct acpi_viot_header *hdr) +{ + int ret = -EINVAL; + struct list_head *list; + struct viot_endpoint *ep; + union { + struct acpi_viot_mmio mmio; + struct acpi_viot_pci_range pci; + } *node = (void *)hdr; + + if (viot_check_bounds(hdr)) + return -EINVAL; + + if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI || + hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO) + return 0; + + ep = kzalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + switch (hdr->type) { + case ACPI_VIOT_NODE_PCI_RANGE: + if (hdr->length < sizeof(node->pci)) { + pr_err(FW_BUG "Invalid PCI node size\n"); + goto err_free; + } + + ep->segment_start = node->pci.segment_start; + ep->segment_end = node->pci.segment_end; + ep->bdf_start = node->pci.bdf_start; + ep->bdf_end = node->pci.bdf_end; + ep->endpoint_id = node->pci.endpoint_start; + ep->viommu = viot_get_iommu(node->pci.output_node); + list = &viot_pci_ranges; + break; + case ACPI_VIOT_NODE_MMIO: + if (hdr->length < sizeof(node->mmio)) { + pr_err(FW_BUG "Invalid MMIO node size\n"); + goto err_free; + } + + ep->address = node->mmio.base_address; + ep->endpoint_id = node->mmio.endpoint; + ep->viommu = viot_get_iommu(node->mmio.output_node); + list = &viot_mmio_endpoints; + break; + default: + pr_warn("Unsupported node %x\n", hdr->type); + ret = 0; + goto err_free; + } + + if (!ep->viommu) { + pr_warn("No IOMMU node found\n"); + /* + * A future version of the table may use the node for other + * purposes. Keep parsing. + */ + ret = 0; + goto err_free; + } + + list_add(&ep->list, list); + return 0; + +err_free: + kfree(ep); + return ret; +} + +/** + * acpi_viot_early_init - Test the presence of VIOT and enable ACS + * + * If the VIOT does exist, ACS must be enabled. This cannot be + * done in acpi_viot_init() which is called after the bus scan + */ +void __init acpi_viot_early_init(void) +{ +#ifdef CONFIG_PCI + acpi_status status; + struct acpi_table_header *hdr; + + status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); + if (ACPI_FAILURE(status)) + return; + pci_request_acs(); + acpi_put_table(hdr); +#endif +} + +/** + * acpi_viot_init - Parse the VIOT table + * + * Parse the VIOT table, prepare the list of endpoints to be used during DMA + * setup of devices. + */ +void __init acpi_viot_init(void) +{ + int i; + acpi_status status; + struct acpi_table_header *hdr; + struct acpi_viot_header *node; + + status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) { + const char *msg = acpi_format_exception(status); + + pr_err("Failed to get table, %s\n", msg); + } + return; + } + + viot = (void *)hdr; + + node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset); + for (i = 0; i < viot->node_count; i++) { + if (viot_parse_node(node)) + return; + + node = ACPI_ADD_PTR(struct acpi_viot_header, node, + node->length); + } + + acpi_put_table(hdr); +} + +static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu, + u32 epid) +{ + if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU)) + return -ENODEV; + + /* We're not translating ourself */ + if (device_match_fwnode(dev, viommu->fwnode)) + return -EINVAL; + + return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode); +} + +static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) +{ + u32 epid; + struct viot_endpoint *ep; + struct device *aliased_dev = data; + u32 domain_nr = pci_domain_nr(pdev->bus); + + list_for_each_entry(ep, &viot_pci_ranges, list) { + if (domain_nr >= ep->segment_start && + domain_nr <= ep->segment_end && + dev_id >= ep->bdf_start && + dev_id <= ep->bdf_end) { + epid = ((domain_nr - ep->segment_start) << 16) + + dev_id - ep->bdf_start + ep->endpoint_id; + + return viot_dev_iommu_init(aliased_dev, ep->viommu, + epid); + } + } + return -ENODEV; +} + +static int viot_mmio_dev_iommu_init(struct platform_device *pdev) +{ + struct resource *mem; + struct viot_endpoint *ep; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -ENODEV; + + list_for_each_entry(ep, &viot_mmio_endpoints, list) { + if (ep->address == mem->start) + return viot_dev_iommu_init(&pdev->dev, ep->viommu, + ep->endpoint_id); + } + return -ENODEV; +} + +/** + * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT + * @dev: the endpoint + * + * Return: 0 on success, <0 on failure + */ +int viot_iommu_configure(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_for_each_dma_alias(to_pci_dev(dev), + viot_pci_dev_iommu_init, dev); + else if (dev_is_platform(dev)) + return viot_mmio_dev_iommu_init(to_platform_device(dev)); + return -ENODEV; +} diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 9614126bf56e..ff6dc957bc11 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -12,13 +12,20 @@ #include "internal.h" #include "sleep.h" +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *context); + void *context; +}; + +static LIST_HEAD(acpi_wakeup_handler_head); +static DEFINE_MUTEX(acpi_wakeup_handler_mutex); + /* * We didn't lock acpi_device_lock in the file, because it invokes oops in * suspend/resume and isn't really required as this is called in S-state. At * that time, there is no device hotplug **/ -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("wakeup_devices") /** * acpi_enable_wakeup_devices - Enable wake-up device GPEs. @@ -30,16 +37,14 @@ ACPI_MODULE_NAME("wakeup_devices") */ void acpi_enable_wakeup_devices(u8 sleep_state) { - struct list_head *node, *next; - - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + struct acpi_device *dev, *tmp; + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid - || sleep_state > (u32) dev->wakeup.sleep_state + || sleep_state > dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) - || dev->wakeup.prepare_count)) + || dev->wakeup.prepare_count)) continue; if (device_may_wakeup(&dev->dev)) @@ -57,16 +62,14 @@ void acpi_enable_wakeup_devices(u8 sleep_state) */ void acpi_disable_wakeup_devices(u8 sleep_state) { - struct list_head *node, *next; - - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = - container_of(node, struct acpi_device, wakeup_list); + struct acpi_device *dev, *tmp; + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (!dev->wakeup.flags.valid - || sleep_state > (u32) dev->wakeup.sleep_state + || sleep_state > dev->wakeup.sleep_state || !(device_may_wakeup(&dev->dev) - || dev->wakeup.prepare_count)) + || dev->wakeup.prepare_count)) continue; acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, @@ -79,13 +82,11 @@ void acpi_disable_wakeup_devices(u8 sleep_state) int __init acpi_wakeup_device_init(void) { - struct list_head *node, *next; + struct acpi_device *dev, *tmp; mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = container_of(node, - struct acpi_device, - wakeup_list); + list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list, + wakeup_list) { if (device_can_wakeup(&dev->dev)) { /* Button GPEs are supposed to be always enabled. */ acpi_enable_gpe(dev->wakeup.gpe_device, @@ -96,3 +97,75 @@ int __init acpi_wakeup_device_init(void) mutex_unlock(&acpi_device_lock); return 0; } + +/** + * acpi_register_wakeup_handler - Register wakeup handler + * @wake_irq: The IRQ through which the device may receive wakeups + * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup + * @context: Context to pass to the handler when calling it + * + * Drivers which may share an IRQ with the SCI can use this to register + * a handler which returns true when the device they are managing wants + * to trigger a wakeup. + */ +int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + /* + * If the device is not sharing its IRQ with the SCI, there is no + * need to register the handler. + */ + if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq) + return 0; + + handler = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!handler) + return -ENOMEM; + + handler->wakeup = wakeup; + handler->context = context; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_add(&handler->list_node, &acpi_wakeup_handler_head); + mutex_unlock(&acpi_wakeup_handler_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler); + +/** + * acpi_unregister_wakeup_handler - Unregister wakeup handler + * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler() + * @context: Context passed to acpi_register_wakeup_handler() + */ +void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup == wakeup && handler->context == context) { + list_del(&handler->list_node); + kfree(handler); + break; + } + } + mutex_unlock(&acpi_wakeup_handler_mutex); +} +EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler); + +bool acpi_check_wakeup_handlers(void) +{ + struct acpi_wakeup_handler *handler; + + /* No need to lock, nothing else is running when we're called. */ + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup(handler->context)) + return true; + } + + return false; +} diff --git a/drivers/acpi/x86/Makefile b/drivers/acpi/x86/Makefile new file mode 100644 index 000000000000..63c99509ed9d --- /dev/null +++ b/drivers/acpi/x86/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_ACPI) += acpi-x86.o +acpi-x86-y += apple.o +acpi-x86-y += cmos_rtc.o +acpi-x86-$(CONFIG_PCI) += lpss.o +acpi-x86-y += s2idle.o +acpi-x86-y += utils.o + +obj-$(CONFIG_X86) += blacklist.o diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c index b7c98ff82d78..45d0f16f374f 100644 --- a/drivers/acpi/x86/apple.c +++ b/drivers/acpi/x86/apple.c @@ -1,16 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * apple.c - Apple ACPI quirks * Copyright (C) 2017 Lukas Wunner <lukas@wunner.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2) as - * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/platform_data/x86/apple.h> #include <linux/uuid.h> +#include "../internal.h" /* Apple _DSM device properties GUID */ static const guid_t apple_prp_guid = @@ -73,13 +71,16 @@ void acpi_extract_apple_properties(struct acpi_device *adev) if ( key->type != ACPI_TYPE_STRING || (val->type != ACPI_TYPE_INTEGER && - val->type != ACPI_TYPE_BUFFER)) + val->type != ACPI_TYPE_BUFFER && + val->type != ACPI_TYPE_STRING)) continue; /* skip invalid properties */ __set_bit(i, valid); newsize += key->string.length + 1; if ( val->type == ACPI_TYPE_BUFFER) newsize += val->buffer.length; + else if (val->type == ACPI_TYPE_STRING) + newsize += val->string.length + 1; } numvalid = bitmap_weight(valid, numprops); @@ -121,6 +122,12 @@ void acpi_extract_apple_properties(struct acpi_device *adev) newprops[v].type = val->type; if (val->type == ACPI_TYPE_INTEGER) { newprops[v].integer.value = val->integer.value; + } else if (val->type == ACPI_TYPE_STRING) { + newprops[v].string.length = val->string.length; + newprops[v].string.pointer = free_space; + memcpy(free_space, val->string.pointer, + val->string.length); + free_space += val->string.length + 1; } else { newprops[v].buffer.length = val->buffer.length; newprops[v].buffer.pointer = free_space; diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/x86/blacklist.c index 995c4d8922b1..55214d0a12b1 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/x86/blacklist.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * blacklist.c * @@ -7,30 +8,20 @@ * * Copyright (C) 2004 Len Brown <len.brown@intel.com> * Copyright (C) 2002 Andy Grover <andrew.grover@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/kernel.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/dmi.h> -#include "internal.h" +#include "../internal.h" +#ifdef CONFIG_DMI static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; +#endif /* * POLICY: If *anything* doesn't work, put it on the blacklist. @@ -60,12 +51,12 @@ int __init acpi_blacklisted(void) i = acpi_match_platform_list(acpi_blacklist); if (i >= 0) { - pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n", + pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n", acpi_blacklist[i].oem_id, acpi_blacklist[i].oem_table_id, acpi_blacklist[i].oem_revision); - pr_err(PREFIX "Reason: %s. This is a %s error\n", + pr_err("Reason: %s. This is a %s error\n", acpi_blacklist[i].reason, (acpi_blacklist[i].data ? "non-recoverable" : "recoverable")); @@ -74,7 +65,9 @@ int __init acpi_blacklisted(void) } (void)early_acpi_osi_init(); +#ifdef CONFIG_DMI dmi_check_system(acpi_rev_dmi_table); +#endif return blacklisted; } @@ -82,8 +75,7 @@ int __init acpi_blacklisted(void) #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE static int __init dmi_enable_rev_override(const struct dmi_system_id *d) { - printk(KERN_NOTICE PREFIX "DMI detected: %s (force ACPI _REV to 5)\n", - d->ident); + pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident); acpi_rev_override_setup(NULL); return 0; } diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/x86/cmos_rtc.c index 0980a133916f..51643ff6fe5f 100644 --- a/drivers/acpi/acpi_cmos_rtc.c +++ b/drivers/acpi/x86/cmos_rtc.c @@ -1,14 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for CMOS RTC Address Space access * * Copyright (C) 2013, Intel Corporation * Authors: Lan Tianyu <tianyu.lan@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> @@ -16,9 +15,7 @@ #include <linux/module.h> #include <linux/mc146818rtc.h> -#include "internal.h" - -ACPI_MODULE_NAME("cmos rtc"); +#include "../internal.h" static const struct acpi_device_id acpi_cmos_rtc_ids[] = { { "PNP0B00" }, @@ -54,34 +51,45 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, return AE_OK; } -static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev, - const struct acpi_device_id *id) +int acpi_install_cmos_rtc_space_handler(acpi_handle handle) { acpi_status status; - status = acpi_install_address_space_handler(adev->handle, + status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler, NULL, NULL); if (ACPI_FAILURE(status)) { - pr_err(PREFIX "Error installing CMOS-RTC region handler\n"); + pr_err("Error installing CMOS-RTC region handler\n"); return -ENODEV; } return 1; } +EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler); -static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) +void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) { - if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle, + if (ACPI_FAILURE(acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler))) - pr_err(PREFIX "Error removing CMOS-RTC region handler\n"); + pr_err("Error removing CMOS-RTC region handler\n"); +} +EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler); + +static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id) +{ + return acpi_install_cmos_rtc_space_handler(adev->handle); +} + +static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev) +{ + acpi_remove_cmos_rtc_space_handler(adev->handle); } static struct acpi_scan_handler cmos_rtc_handler = { .ids = acpi_cmos_rtc_ids, - .attach = acpi_install_cmos_rtc_space_handler, - .detach = acpi_remove_cmos_rtc_space_handler, + .attach = acpi_cmos_rtc_attach_handler, + .detach = acpi_cmos_rtc_detach_handler, }; void __init acpi_cmos_rtc_init(void) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/x86/lpss.c index 5f94c35d165f..1dcb80ab0d23 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/x86/lpss.c @@ -1,34 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for Intel Lynxpoint LPSS. * * Copyright (C) 2013, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> +#include <linux/dmi.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/platform_data/clk-lpss.h> +#include <linux/platform_data/x86/clk-lpss.h> #include <linux/platform_data/x86/pmc_atom.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pwm.h> +#include <linux/pxa2xx_ssp.h> #include <linux/suspend.h> #include <linux/delay.h> -#include "internal.h" - -ACPI_MODULE_NAME("acpi_lpss"); +#include "../internal.h" #ifdef CONFIG_X86_INTEL_LPSS @@ -69,11 +66,15 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_CLK_DIVIDER BIT(2) #define LPSS_LTR BIT(3) #define LPSS_SAVE_CTX BIT(4) -#define LPSS_NO_D3_DELAY BIT(5) - -/* Crystal Cove PMIC shares same ACPI ID between different platforms */ -#define BYT_CRC_HRV 2 -#define CHT_CRC_HRV 3 +/* + * For some devices the DSDT AML code for another device turns off the device + * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) + * as ctx register values. + * Luckily these devices always use the same ctx register values, so we can + * work around this by saving the ctx registers once on activation. + */ +#define LPSS_SAVE_CTX_ONCE BIT(5) +#define LPSS_NO_D3_DELAY BIT(6) struct lpss_private_data; @@ -82,7 +83,7 @@ struct lpss_device_desc { const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; - struct property_entry *properties; + const struct property_entry *properties; void (*setup)(struct lpss_private_data *pdata); bool resume_from_noirq; }; @@ -160,19 +161,14 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata) */ static struct pwm_lookup byt_pwm_lookup[] = { PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", - "pwm_backlight", 0, PWM_POLARITY_NORMAL, + "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, "pwm-lpss-platform"), }; static void byt_pwm_setup(struct lpss_private_data *pdata) { - struct acpi_device *adev = pdata->adev; - /* Only call pwm_add_table for the first PWM controller */ - if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) - return; - - if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV)) + if (acpi_dev_uid_match(pdata->adev, 1)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } @@ -180,20 +176,18 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) static void byt_i2c_setup(struct lpss_private_data *pdata) { - const char *uid_str = acpi_device_uid(pdata->adev); acpi_handle handle = pdata->adev->handle; unsigned long long shared_host = 0; acpi_status status; - long uid = 0; - - /* Expected to always be true, but better safe then sorry */ - if (uid_str) - uid = simple_strtol(uid_str, NULL, 10); - - /* Detect I2C bus shared with PUNIT and ignore its d3 status */ - status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); - if (ACPI_SUCCESS(status) && shared_host && uid) - pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); + u64 uid; + + /* Expected to always be successful, but better safe then sorry */ + if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) { + /* Detect I2C bus shared with PUNIT and ignore its d3 status */ + status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); + if (ACPI_SUCCESS(status) && shared_host) + pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); + } lpss_deassert_reset(pdata); @@ -203,31 +197,42 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } -/* BSW PWM used for backlight control by the i915 driver */ +/* + * BSW PWM1 is used for backlight control by the i915 driver + * BSW PWM2 is used for backlight control for fixed (etched into the glass) + * touch controls on some models. These touch-controls have specialized + * drivers which know they need the "pwm_soc_lpss_2" con-id. + */ static struct pwm_lookup bsw_pwm_lookup[] = { PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", - "pwm_backlight", 0, PWM_POLARITY_NORMAL, + "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), + PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL, + "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL, "pwm-lpss-platform"), }; static void bsw_pwm_setup(struct lpss_private_data *pdata) { - struct acpi_device *adev = pdata->adev; - /* Only call pwm_add_table for the first PWM controller */ - if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) - return; - - pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); + if (acpi_dev_uid_match(pdata->adev, 1)) + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } -static const struct lpss_device_desc lpt_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, +static const struct property_entry lpt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP), + { } +}; + +static const struct lpss_device_desc lpt_spi_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, .prv_offset = 0x800, + .properties = lpt_spi_properties, }; static const struct lpss_device_desc lpt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, .prv_offset = 0x800, }; @@ -239,7 +244,8 @@ static struct property_entry uart_properties[] = { }; static const struct lpss_device_desc lpt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, @@ -259,9 +265,16 @@ static const struct lpss_device_desc byt_pwm_dev_desc = { }; static const struct lpss_device_desc bsw_pwm_dev_desc = { - .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, .prv_offset = 0x800, .setup = bsw_pwm_setup, + .resume_from_noirq = true, +}; + +static const struct lpss_device_desc bsw_pwm2_dev_desc = { + .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, + .resume_from_noirq = true, }; static const struct lpss_device_desc byt_uart_dev_desc = { @@ -281,9 +294,15 @@ static const struct lpss_device_desc bsw_uart_dev_desc = { .properties = uart_properties, }; +static const struct property_entry byt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP), + { } +}; + static const struct lpss_device_desc byt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, + .properties = byt_spi_properties, }; static const struct lpss_device_desc byt_sdio_dev_desc = { @@ -304,18 +323,23 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = { .resume_from_noirq = true, }; +static const struct property_entry bsw_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP), + PROPERTY_ENTRY_U32("num-cs", 2), + { } +}; + static const struct lpss_device_desc bsw_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, .prv_offset = 0x400, .setup = lpss_deassert_reset, + .properties = bsw_spi_properties, }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ - ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL), {} }; @@ -330,14 +354,13 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, /* Lynxpoint LPSS devices */ - { "INT33C0", LPSS_ADDR(lpt_dev_desc) }, - { "INT33C1", LPSS_ADDR(lpt_dev_desc) }, + { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) }, { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, - { "INT33C7", }, /* BayTrail LPSS devices */ { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, @@ -345,47 +368,38 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, - { "INT33B2", }, - { "INT33FC", }, /* Braswell LPSS devices */ { "80862286", LPSS_ADDR(lpss_dma_desc) }, { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, + { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) }, { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, { "808622C0", LPSS_ADDR(lpss_dma_desc) }, { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, /* Broadwell LPSS devices */ - { "INT3430", LPSS_ADDR(lpt_dev_desc) }, - { "INT3431", LPSS_ADDR(lpt_dev_desc) }, + { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) }, { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, - { "INT3437", }, - - /* Wildcat Point LPSS devices */ - { "INT3438", LPSS_ADDR(lpt_dev_desc) }, { } }; #ifdef CONFIG_X86_INTEL_LPSS -static int is_memory(struct acpi_resource *res, void *not_used) -{ - struct resource r; - return !acpi_dev_resource_memory(res, &r); -} - /* LPSS main clock device. */ static struct platform_device *lpss_clk_dev; static inline void lpt_register_clock_device(void) { - lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0); + lpss_clk_dev = platform_device_register_simple("clk-lpss-atom", + PLATFORM_DEVID_NONE, + NULL, 0); } static int register_device_clock(struct acpi_device *adev, @@ -401,6 +415,9 @@ static int register_device_clock(struct acpi_device *adev, if (!lpss_clk_dev) lpt_register_clock_device(); + if (IS_ERR(lpss_clk_dev)) + return PTR_ERR(lpss_clk_dev); + clk_data = platform_get_drvdata(lpss_clk_dev); if (!clk_data) return -ENODEV; @@ -434,8 +451,9 @@ static int register_device_clock(struct acpi_device *adev, if (!clk_name) return -ENOMEM; clk = clk_register_fractional_divider(NULL, clk_name, parent, - 0, prv_base, - 1, 15, 16, 15, 0, NULL); + 0, prv_base, 1, 15, 16, 15, + CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, + NULL); parent = clk_name; clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); @@ -464,6 +482,18 @@ struct lpss_device_links { const char *consumer_hid; const char *consumer_uid; u32 flags; + const struct dmi_system_id *dep_missing_ids; +}; + +/* Please keep this list sorted alphabetically by vendor and model */ +static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} }; /* @@ -474,36 +504,29 @@ struct lpss_device_links { * the supplier is not enumerated until after the consumer is probed. */ static const struct lpss_device_links lpss_device_links[] = { + /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME}, + /* CHT iGPU depends on PMIC I2C controller */ {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ + {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME, + i2c1_dep_missing_dmi_ids}, + /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ + {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, }; -static bool hid_uid_match(struct acpi_device *adev, - const char *hid2, const char *uid2) -{ - const char *hid1 = acpi_device_hid(adev); - const char *uid1 = acpi_device_uid(adev); - - if (strcmp(hid1, hid2)) - return false; - - if (!uid2) - return true; - - return uid1 && !strcmp(uid1, uid2); -} - static bool acpi_lpss_is_supplier(struct acpi_device *adev, const struct lpss_device_links *link) { - return hid_uid_match(adev, link->supplier_hid, link->supplier_uid); + return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid); } static bool acpi_lpss_is_consumer(struct acpi_device *adev, const struct lpss_device_links *link) { - return hid_uid_match(adev, link->consumer_hid, link->consumer_uid); + return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid); } struct hid_uid { @@ -511,15 +534,15 @@ struct hid_uid { const char *uid; }; -static int match_hid_uid(struct device *dev, void *data) +static int match_hid_uid(struct device *dev, const void *data) { struct acpi_device *adev = ACPI_COMPANION(dev); - struct hid_uid *id = data; + const struct hid_uid *id = data; if (!adev) return 0; - return hid_uid_match(adev, id->hid, id->uid); + return acpi_dev_hid_uid_match(adev, id->hid, id->uid); } static struct device *acpi_lpss_find_device(const char *hid, const char *uid) @@ -538,30 +561,6 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid) return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); } -static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) -{ - struct acpi_handle_list dep_devices; - acpi_status status; - int i; - - if (!acpi_has_method(adev->handle, "_DEP")) - return false; - - status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, - &dep_devices); - if (ACPI_FAILURE(status)) { - dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); - return false; - } - - for (i = 0; i < dep_devices.count; i++) { - if (dep_devices.handles[i] == handle) - return true; - } - - return false; -} - static void acpi_lpss_link_consumer(struct device *dev1, const struct lpss_device_links *link) { @@ -571,7 +570,8 @@ static void acpi_lpss_link_consumer(struct device *dev1, if (!dev2) return; - if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1))) + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1))) device_link_add(dev2, dev1, link->flags); put_device(dev2); @@ -586,7 +586,8 @@ static void acpi_lpss_link_supplier(struct device *dev1, if (!dev2) return; - if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2))) + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2))) device_link_add(dev1, dev2, link->flags); put_device(dev2); @@ -619,38 +620,33 @@ static int acpi_lpss_create_device(struct acpi_device *adev, int ret; dev_desc = (const struct lpss_device_desc *)id->driver_data; - if (!dev_desc) { - pdev = acpi_create_platform_device(adev, NULL); - return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; - } + if (!dev_desc) + return -EINVAL; + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); + ret = acpi_dev_get_memory_resources(adev, &resource_list); if (ret < 0) goto err_out; - list_for_each_entry(rentry, &resource_list, node) - if (resource_type(rentry->res) == IORESOURCE_MEM) { - if (dev_desc->prv_size_override) - pdata->mmio_size = dev_desc->prv_size_override; - else - pdata->mmio_size = resource_size(rentry->res); - pdata->mmio_base = ioremap(rentry->res->start, - pdata->mmio_size); - break; - } + rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); + if (rentry) { + if (dev_desc->prv_size_override) + pdata->mmio_size = dev_desc->prv_size_override; + else + pdata->mmio_size = resource_size(rentry->res); + pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size); + } acpi_dev_free_resource_list(&resource_list); if (!pdata->mmio_base) { /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ adev->pnp.type.platform_id = 0; - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; + goto out_free; } pdata->adev = adev; @@ -661,11 +657,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->flags & LPSS_CLK) { ret = register_device_clock(adev, pdata); - if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; - } + if (ret) + goto out_free; } /* @@ -677,15 +670,19 @@ static int acpi_lpss_create_device(struct acpi_device *adev, adev->driver_data = pdata; pdev = acpi_create_platform_device(adev, dev_desc->properties); - if (!IS_ERR_OR_NULL(pdev)) { - acpi_lpss_create_device_links(adev, pdev); - return 1; + if (IS_ERR_OR_NULL(pdev)) { + adev->driver_data = NULL; + ret = PTR_ERR(pdev); + goto err_out; } - ret = PTR_ERR(pdev); - adev->driver_data = NULL; + acpi_lpss_create_device_links(adev, pdev); + return 1; - err_out: +out_free: + /* Skip the device, but continue the namespace scan */ + ret = 0; +err_out: kfree(pdata); return ret; } @@ -703,14 +700,13 @@ static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) { - struct acpi_device *adev; + struct acpi_device *adev = ACPI_COMPANION(dev); struct lpss_private_data *pdata; unsigned long flags; int ret; - ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev); - if (WARN_ON(ret)) - return ret; + if (WARN_ON(!adev)) + return -ENODEV; spin_lock_irqsave(&dev->power.lock, flags); if (pm_runtime_suspended(dev)) { @@ -723,6 +719,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) goto out; } *val = __lpss_reg_read(pdata, reg); + ret = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -741,7 +738,7 @@ static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, if (ret) return ret; - return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value); + return sysfs_emit(buf, "%08x\n", ltr_value); } static ssize_t lpss_ltr_mode_show(struct device *dev, @@ -884,9 +881,12 @@ static int acpi_lpss_activate(struct device *dev) * we have to deassert reset line to be sure that ->probe() will * recognize the device. */ - if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) lpss_deassert_reset(pdata); + if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) + acpi_lpss_save_ctx(dev, pdata); + return 0; } @@ -1030,7 +1030,7 @@ static int acpi_lpss_resume(struct device *dev) acpi_lpss_d3_to_d0_delay(pdata); - if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) acpi_lpss_restore_ctx(dev, pdata); return 0; @@ -1041,7 +1041,7 @@ static int acpi_lpss_do_suspend_late(struct device *dev) { int ret; - if (dev_pm_smart_suspend_and_suspended(dev)) + if (dev_pm_skip_suspend(dev)) return 0; ret = pm_generic_suspend_late(dev); @@ -1064,6 +1064,13 @@ static int acpi_lpss_suspend_noirq(struct device *dev) int ret; if (pdata->dev_desc->resume_from_noirq) { + /* + * The driver's ->suspend_late callback will be invoked by + * acpi_lpss_do_suspend_late(), with the assumption that the + * driver really wanted to run that code in ->suspend_noirq, but + * it could not run after acpi_dev_suspend() and the driver + * expected the latter to be called in the "late" phase. + */ ret = acpi_lpss_do_suspend_late(dev); if (ret) return ret; @@ -1086,6 +1093,9 @@ static int acpi_lpss_resume_early(struct device *dev) if (pdata->dev_desc->resume_from_noirq) return 0; + if (dev_pm_skip_resume(dev)) + return 0; + return acpi_lpss_do_resume_early(dev); } @@ -1094,16 +1104,97 @@ static int acpi_lpss_resume_noirq(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - ret = acpi_subsys_resume_noirq(dev); + /* Follow acpi_subsys_resume_noirq(). */ + if (dev_pm_skip_resume(dev)) + return 0; + + ret = pm_generic_resume_noirq(dev); if (ret) return ret; - if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq) - ret = acpi_lpss_do_resume_early(dev); + if (!pdata->dev_desc->resume_from_noirq) + return 0; - return ret; + /* + * The driver's ->resume_early callback will be invoked by + * acpi_lpss_do_resume_early(), with the assumption that the driver + * really wanted to run that code in ->resume_noirq, but it could not + * run before acpi_dev_resume() and the driver expected the latter to be + * called in the "early" phase. + */ + return acpi_lpss_do_resume_early(dev); +} + +static int acpi_lpss_do_restore_early(struct device *dev) +{ + int ret = acpi_lpss_resume(dev); + + return ret ? ret : pm_generic_restore_early(dev); } +static int acpi_lpss_restore_early(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_restore_early(dev); +} + +static int acpi_lpss_restore_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + ret = pm_generic_restore_noirq(dev); + if (ret) + return ret; + + if (!pdata->dev_desc->resume_from_noirq) + return 0; + + /* This is analogous to what happens in acpi_lpss_resume_noirq(). */ + return acpi_lpss_do_restore_early(dev); +} + +static int acpi_lpss_do_poweroff_late(struct device *dev) +{ + int ret = pm_generic_poweroff_late(dev); + + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); +} + +static int acpi_lpss_poweroff_late(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (dev_pm_skip_suspend(dev)) + return 0; + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_poweroff_late(dev); +} + +static int acpi_lpss_poweroff_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (dev_pm_skip_suspend(dev)) + return 0; + + if (pdata->dev_desc->resume_from_noirq) { + /* This is analogous to the acpi_lpss_suspend_noirq() case. */ + int ret = acpi_lpss_do_poweroff_late(dev); + + if (ret) + return ret; + } + + return pm_generic_poweroff_noirq(dev); +} #endif /* CONFIG_PM_SLEEP */ static int acpi_lpss_runtime_suspend(struct device *dev) @@ -1137,14 +1228,11 @@ static struct dev_pm_domain acpi_lpss_pm_domain = { .resume_noirq = acpi_lpss_resume_noirq, .resume_early = acpi_lpss_resume_early, .freeze = acpi_subsys_freeze, - .freeze_late = acpi_subsys_freeze_late, - .freeze_noirq = acpi_subsys_freeze_noirq, - .thaw_noirq = acpi_subsys_thaw_noirq, - .poweroff = acpi_subsys_suspend, - .poweroff_late = acpi_lpss_suspend_late, - .poweroff_noirq = acpi_subsys_suspend_noirq, - .restore_noirq = acpi_subsys_resume_noirq, - .restore_early = acpi_lpss_resume_early, + .poweroff = acpi_subsys_poweroff, + .poweroff_late = acpi_lpss_poweroff_late, + .poweroff_noirq = acpi_lpss_poweroff_noirq, + .restore_noirq = acpi_lpss_restore_noirq, + .restore_early = acpi_lpss_restore_early, #endif .runtime_suspend = acpi_lpss_runtime_suspend, .runtime_resume = acpi_lpss_runtime_resume, @@ -1164,7 +1252,8 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, if (!id || !id->driver_data) return 0; - if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) return 0; pdata = acpi_driver_data(adev); @@ -1237,7 +1326,7 @@ void __init acpi_lpss_init(void) const struct x86_cpu_id *id; int ret; - ret = lpt_clk_init(); + ret = lpss_atom_clk_init(); if (ret) return; diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c new file mode 100644 index 000000000000..6d4d06236f61 --- /dev/null +++ b/drivers/acpi/x86/s2idle.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Architecture-specific ACPI-based support for suspend-to-idle. + * + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * + * On platforms supporting the Low Power S0 Idle interface there is an ACPI + * device object with the PNP0D80 compatible device ID (System Power Management + * Controller) and a specific _DSM method under it. That method, if present, + * can be used to indicate to the platform that the OS is transitioning into a + * low-power state in which certain types of activity are not desirable or that + * it is leaving such a state, which allows the platform to adjust its operation + * mode accordingly. + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/dmi.h> +#include <linux/suspend.h> + +#include "../sleep.h" + +#ifdef CONFIG_SUSPEND + +static bool sleep_no_lps0 __read_mostly; +module_param(sleep_no_lps0, bool, 0644); +MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface"); + +static const struct acpi_device_id lps0_device_ids[] = { + {"PNP0D80", }, + {"", }, +}; + +/* Microsoft platform agnostic UUID */ +#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461" + +#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" + +#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 +#define ACPI_LPS0_SCREEN_OFF 3 +#define ACPI_LPS0_SCREEN_ON 4 +#define ACPI_LPS0_ENTRY 5 +#define ACPI_LPS0_EXIT 6 +#define ACPI_LPS0_MS_ENTRY 7 +#define ACPI_LPS0_MS_EXIT 8 + +/* AMD */ +#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721" +#define ACPI_LPS0_ENTRY_AMD 2 +#define ACPI_LPS0_EXIT_AMD 3 +#define ACPI_LPS0_SCREEN_OFF_AMD 4 +#define ACPI_LPS0_SCREEN_ON_AMD 5 + +static acpi_handle lps0_device_handle; +static guid_t lps0_dsm_guid; +static int lps0_dsm_func_mask; + +static guid_t lps0_dsm_guid_microsoft; +static int lps0_dsm_func_mask_microsoft; +static int lps0_dsm_state; + +/* Device constraint entry structure */ +struct lpi_device_info { + char *name; + int enabled; + union acpi_object *package; +}; + +/* Constraint package structure */ +struct lpi_device_constraint { + int uid; + int min_dstate; + int function_states; +}; + +struct lpi_constraints { + acpi_handle handle; + int min_dstate; +}; + +/* AMD Constraint package structure */ +struct lpi_device_constraint_amd { + char *name; + int enabled; + int function_states; + int min_dstate; +}; + +static LIST_HEAD(lps0_s2idle_devops_head); + +static struct lpi_constraints *lpi_constraints_table; +static int lpi_constraints_table_size; +static int rev_id; + +#define for_each_lpi_constraint(entry) \ + for (int i = 0; \ + entry = &lpi_constraints_table[i], i < lpi_constraints_table_size; \ + i++) + +static void lpi_device_get_constraints_amd(void) +{ + union acpi_object *out_obj; + int i, j, k; + + out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, + rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, + NULL, ACPI_TYPE_PACKAGE); + + acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", + out_obj ? "successful" : "failed"); + + if (!out_obj) + return; + + for (i = 0; i < out_obj->package.count; i++) { + union acpi_object *package = &out_obj->package.elements[i]; + + if (package->type == ACPI_TYPE_PACKAGE) { + if (lpi_constraints_table) { + acpi_handle_err(lps0_device_handle, + "Duplicate constraints list\n"); + goto free_acpi_buffer; + } + + lpi_constraints_table = kcalloc(package->package.count, + sizeof(*lpi_constraints_table), + GFP_KERNEL); + + if (!lpi_constraints_table) + goto free_acpi_buffer; + + acpi_handle_debug(lps0_device_handle, + "LPI: constraints list begin:\n"); + + for (j = 0; j < package->package.count; j++) { + union acpi_object *info_obj = &package->package.elements[j]; + struct lpi_device_constraint_amd dev_info = {}; + struct lpi_constraints *list; + acpi_status status; + + list = &lpi_constraints_table[lpi_constraints_table_size]; + + for (k = 0; k < info_obj->package.count; k++) { + union acpi_object *obj = &info_obj->package.elements[k]; + + switch (k) { + case 0: + dev_info.enabled = obj->integer.value; + break; + case 1: + dev_info.name = obj->string.pointer; + break; + case 2: + dev_info.function_states = obj->integer.value; + break; + case 3: + dev_info.min_dstate = obj->integer.value; + break; + } + } + + acpi_handle_debug(lps0_device_handle, + "Name:%s, Enabled: %d, States: %d, MinDstate: %d\n", + dev_info.name, + dev_info.enabled, + dev_info.function_states, + dev_info.min_dstate); + + if (!dev_info.enabled || !dev_info.name || + !dev_info.min_dstate) + continue; + + status = acpi_get_handle(NULL, dev_info.name, &list->handle); + if (ACPI_FAILURE(status)) + continue; + + list->min_dstate = dev_info.min_dstate; + + lpi_constraints_table_size++; + } + } + } + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); + +free_acpi_buffer: + ACPI_FREE(out_obj); +} + +static void lpi_device_get_constraints(void) +{ + union acpi_object *out_obj; + int i; + + out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, + 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, + NULL, ACPI_TYPE_PACKAGE); + + acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", + out_obj ? "successful" : "failed"); + + if (!out_obj) + return; + + lpi_constraints_table = kcalloc(out_obj->package.count, + sizeof(*lpi_constraints_table), + GFP_KERNEL); + if (!lpi_constraints_table) + goto free_acpi_buffer; + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); + + for (i = 0; i < out_obj->package.count; i++) { + struct lpi_constraints *constraint; + acpi_status status; + union acpi_object *package = &out_obj->package.elements[i]; + struct lpi_device_info info = { }; + int package_count = 0, j; + + if (!package) + continue; + + for (j = 0; j < package->package.count; j++) { + union acpi_object *element = + &(package->package.elements[j]); + + switch (element->type) { + case ACPI_TYPE_INTEGER: + info.enabled = element->integer.value; + break; + case ACPI_TYPE_STRING: + info.name = element->string.pointer; + break; + case ACPI_TYPE_PACKAGE: + package_count = element->package.count; + info.package = element->package.elements; + break; + } + } + + if (!info.enabled || !info.package || !info.name) + continue; + + constraint = &lpi_constraints_table[lpi_constraints_table_size]; + + status = acpi_get_handle(NULL, info.name, &constraint->handle); + if (ACPI_FAILURE(status)) + continue; + + acpi_handle_debug(lps0_device_handle, + "index:%d Name:%s\n", i, info.name); + + constraint->min_dstate = -1; + + for (j = 0; j < package_count; j++) { + union acpi_object *info_obj = &info.package[j]; + union acpi_object *cnstr_pkg; + union acpi_object *obj; + struct lpi_device_constraint dev_info; + + switch (info_obj->type) { + case ACPI_TYPE_INTEGER: + /* version */ + break; + case ACPI_TYPE_PACKAGE: + if (info_obj->package.count < 2) + break; + + cnstr_pkg = info_obj->package.elements; + obj = &cnstr_pkg[0]; + dev_info.uid = obj->integer.value; + obj = &cnstr_pkg[1]; + dev_info.min_dstate = obj->integer.value; + + acpi_handle_debug(lps0_device_handle, + "uid:%d min_dstate:%s\n", + dev_info.uid, + acpi_power_state_string(dev_info.min_dstate)); + + constraint->min_dstate = dev_info.min_dstate; + break; + } + } + + if (constraint->min_dstate < 0) { + acpi_handle_debug(lps0_device_handle, + "Incomplete constraint defined\n"); + continue; + } + + lpi_constraints_table_size++; + } + + acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); + +free_acpi_buffer: + ACPI_FREE(out_obj); +} + +static void lpi_check_constraints(void) +{ + struct lpi_constraints *entry; + + if (IS_ERR_OR_NULL(lpi_constraints_table)) + return; + + for_each_lpi_constraint(entry) { + struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle); + + if (!adev) + continue; + + acpi_handle_debug(entry->handle, + "LPI: required min power state:%s current power state:%s\n", + acpi_power_state_string(entry->min_dstate), + acpi_power_state_string(adev->power.state)); + + if (!adev->flags.power_manageable) { + acpi_handle_info(entry->handle, "LPI: Device not power manageable\n"); + entry->handle = NULL; + continue; + } + + if (adev->power.state < entry->min_dstate) + acpi_handle_info(entry->handle, + "LPI: Constraint not met; min power state:%s current power state:%s\n", + acpi_power_state_string(entry->min_dstate), + acpi_power_state_string(adev->power.state)); + } +} + +static bool acpi_s2idle_vendor_amd(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_AMD; +} + +static const char *acpi_sleep_dsm_state_to_str(unsigned int state) +{ + if (lps0_dsm_func_mask_microsoft || !acpi_s2idle_vendor_amd()) { + switch (state) { + case ACPI_LPS0_SCREEN_OFF: + return "screen off"; + case ACPI_LPS0_SCREEN_ON: + return "screen on"; + case ACPI_LPS0_ENTRY: + return "lps0 entry"; + case ACPI_LPS0_EXIT: + return "lps0 exit"; + case ACPI_LPS0_MS_ENTRY: + return "lps0 ms entry"; + case ACPI_LPS0_MS_EXIT: + return "lps0 ms exit"; + } + } else { + switch (state) { + case ACPI_LPS0_SCREEN_ON_AMD: + return "screen on"; + case ACPI_LPS0_SCREEN_OFF_AMD: + return "screen off"; + case ACPI_LPS0_ENTRY_AMD: + return "lps0 entry"; + case ACPI_LPS0_EXIT_AMD: + return "lps0 exit"; + } + } + + return "unknown"; +} + +static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid) +{ + union acpi_object *out_obj; + + if (!(func_mask & (1 << func))) + return; + + out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid, + rev_id, func, NULL); + ACPI_FREE(out_obj); + + lps0_dsm_state = func; + if (pm_debug_messages_on) { + acpi_handle_info(lps0_device_handle, + "%s transitioned to state %s\n", + out_obj ? "Successfully" : "Failed to", + acpi_sleep_dsm_state_to_str(lps0_dsm_state)); + } +} + + +static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid) +{ + union acpi_object *obj; + int ret = -EINVAL; + + guid_parse(uuid, dsm_guid); + + /* Check if the _DSM is present and as expected. */ + obj = acpi_evaluate_dsm_typed(handle, dsm_guid, rev, 0, NULL, ACPI_TYPE_BUFFER); + if (!obj || obj->buffer.length == 0 || obj->buffer.length > sizeof(u32)) { + acpi_handle_debug(handle, + "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev); + goto out; + } + + ret = *(int *)obj->buffer.pointer; + acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret); + +out: + ACPI_FREE(obj); + return ret; +} + +struct amd_lps0_hid_device_data { + const bool check_off_by_one; +}; + +static const struct amd_lps0_hid_device_data amd_picasso = { + .check_off_by_one = true, +}; + +static const struct amd_lps0_hid_device_data amd_cezanne = { + .check_off_by_one = false, +}; + +static const struct acpi_device_id amd_hid_ids[] = { + {"AMD0004", (kernel_ulong_t)&amd_picasso, }, + {"AMD0005", (kernel_ulong_t)&amd_picasso, }, + {"AMDI0005", (kernel_ulong_t)&amd_picasso, }, + {"AMDI0006", (kernel_ulong_t)&amd_cezanne, }, + {} +}; + +static int lps0_device_attach(struct acpi_device *adev, + const struct acpi_device_id *not_used) +{ + if (lps0_device_handle) + return 0; + + lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, + ACPI_LPS0_DSM_UUID_MICROSOFT, 0, + &lps0_dsm_guid_microsoft); + if (acpi_s2idle_vendor_amd()) { + static const struct acpi_device_id *dev_id; + const struct amd_lps0_hid_device_data *data; + + for (dev_id = &amd_hid_ids[0]; dev_id->id[0]; dev_id++) + if (acpi_dev_hid_uid_match(adev, dev_id->id, NULL)) + break; + if (dev_id->id[0]) + data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data; + else + data = &amd_cezanne; + lps0_dsm_func_mask = validate_dsm(adev->handle, + ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); + if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) { + lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; + acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", + ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); + } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) { + lps0_dsm_func_mask_microsoft = -EINVAL; + acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); + } + } else { + rev_id = 1; + lps0_dsm_func_mask = validate_dsm(adev->handle, + ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid); + if (lps0_dsm_func_mask > 0 && lps0_dsm_func_mask_microsoft > 0) { + unsigned int func_mask; + + /* + * Log a message if the _DSM function sets for two + * different UUIDs overlap. + */ + func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft; + if (func_mask) + acpi_handle_info(adev->handle, + "Duplicate LPS0 _DSM functions (mask: 0x%x)\n", + func_mask); + } + } + + if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0) + return 0; //function evaluation failed + + lps0_device_handle = adev->handle; + + /* + * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in + * the FADT and the default suspend mode was not set from the command + * line. + */ + if ((acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) && + mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3) { + mem_sleep_current = PM_SUSPEND_TO_IDLE; + pr_info("Low-power S0 idle used by default for system suspend\n"); + } + + /* + * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the + * EC GPE to be enabled while suspended for certain wakeup devices to + * work, so mark it as wakeup-capable. + */ + acpi_ec_mark_gpe_for_wake(); + + return 0; +} + +static struct acpi_scan_handler lps0_handler = { + .ids = lps0_device_ids, + .attach = lps0_device_attach, +}; + +static int acpi_s2idle_begin_lps0(void) +{ + if (pm_debug_messages_on && !lpi_constraints_table) { + if (acpi_s2idle_vendor_amd()) + lpi_device_get_constraints_amd(); + else + lpi_device_get_constraints(); + + /* + * Try to retrieve the constraints only once because failures + * to do so usually are sticky. + */ + if (!lpi_constraints_table) + lpi_constraints_table = ERR_PTR(-ENODATA); + } + + return acpi_s2idle_begin(); +} + +static int acpi_s2idle_prepare_late_lps0(void) +{ + struct acpi_s2idle_dev_ops *handler; + + if (!lps0_device_handle || sleep_no_lps0) + return 0; + + if (pm_debug_messages_on) + lpi_check_constraints(); + + /* Screen off */ + if (lps0_dsm_func_mask > 0) + acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? + ACPI_LPS0_SCREEN_OFF_AMD : + ACPI_LPS0_SCREEN_OFF, + lps0_dsm_func_mask, lps0_dsm_guid); + + if (lps0_dsm_func_mask_microsoft > 0) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + + /* LPS0 entry */ + if (lps0_dsm_func_mask > 0 && acpi_s2idle_vendor_amd()) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD, + lps0_dsm_func_mask, lps0_dsm_guid); + + if (lps0_dsm_func_mask_microsoft > 0) { + /* Modern Standby entry */ + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + } + + if (lps0_dsm_func_mask > 0 && !acpi_s2idle_vendor_amd()) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, + lps0_dsm_func_mask, lps0_dsm_guid); + + list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { + if (handler->prepare) + handler->prepare(); + } + + return 0; +} + +static void acpi_s2idle_check_lps0(void) +{ + struct acpi_s2idle_dev_ops *handler; + + if (!lps0_device_handle || sleep_no_lps0) + return; + + list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { + if (handler->check) + handler->check(); + } +} + +static void acpi_s2idle_restore_early_lps0(void) +{ + struct acpi_s2idle_dev_ops *handler; + + if (!lps0_device_handle || sleep_no_lps0) + return; + + list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) + if (handler->restore) + handler->restore(); + + /* LPS0 exit */ + if (lps0_dsm_func_mask > 0) + acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? + ACPI_LPS0_EXIT_AMD : + ACPI_LPS0_EXIT, + lps0_dsm_func_mask, lps0_dsm_guid); + + if (lps0_dsm_func_mask_microsoft > 0) { + acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + /* Modern Standby exit */ + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + } + + /* Screen on */ + if (lps0_dsm_func_mask_microsoft > 0) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + if (lps0_dsm_func_mask > 0) + acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? + ACPI_LPS0_SCREEN_ON_AMD : + ACPI_LPS0_SCREEN_ON, + lps0_dsm_func_mask, lps0_dsm_guid); +} + +static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = { + .begin = acpi_s2idle_begin_lps0, + .prepare = acpi_s2idle_prepare, + .prepare_late = acpi_s2idle_prepare_late_lps0, + .check = acpi_s2idle_check_lps0, + .wake = acpi_s2idle_wake, + .restore_early = acpi_s2idle_restore_early_lps0, + .restore = acpi_s2idle_restore, + .end = acpi_s2idle_end, +}; + +void __init acpi_s2idle_setup(void) +{ + acpi_scan_add_handler(&lps0_handler); + s2idle_set_ops(&acpi_s2idle_ops_lps0); +} + +int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ + unsigned int sleep_flags; + + if (!lps0_device_handle || sleep_no_lps0) + return -ENODEV; + + sleep_flags = lock_system_sleep(); + list_add(&arg->list_node, &lps0_s2idle_devops_head); + unlock_system_sleep(sleep_flags); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_lps0_dev); + +void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ + unsigned int sleep_flags; + + if (!lps0_device_handle || sleep_no_lps0) + return; + + sleep_flags = lock_system_sleep(); + list_del(&arg->list_node); + unlock_system_sleep(sleep_flags); +} +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev); + +#endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 9a8e286dd86f..4ee30c2897a2 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * X86 ACPI Utility Functions * @@ -5,14 +6,14 @@ * * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi.h> #include <linux/dmi.h> +#include <linux/pci.h> +#include <linux/platform_device.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "../internal.h" @@ -25,111 +26,630 @@ * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows * driver bugs. We use DMI matching to match known cases of this. * - * We work around this by always reporting ACPI_STA_DEFAULT for these - * devices. Note this MUST only be done for devices where this is safe. + * Likewise sometimes some not-actually present devices are sometimes + * reported as present, which may cause issues. * - * This forcing of devices to be present is limited to specific CPU (SoC) - * models both to avoid potentially causing trouble on other models and - * because some HIDs are re-used on different SoCs for completely - * different devices. + * We work around this by using the below quirk list to override the status + * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0). + * Note this MUST only be done for devices where this is safe. + * + * This status overriding is limited to specific CPU (SoC) models both to + * avoid potentially causing trouble on other models and because some HIDs + * are re-used on different SoCs for completely different devices. */ -struct always_present_id { +struct override_status_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; struct dmi_system_id dmi_ids[2]; /* Optional */ const char *uid; + const char *path; + unsigned long long status; }; -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } - -#define ENTRY(hid, uid, cpu_models, dmi...) { \ +#define ENTRY(status, hid, uid, path, cpu_vfm, dmi...) { \ { { hid, }, {} }, \ - { cpu_models, {} }, \ + { X86_MATCH_VFM(cpu_vfm, NULL), {} }, \ { { .matches = dmi }, {} }, \ uid, \ + path, \ + status, \ } -static const struct always_present_id always_present_ids[] = { +#define PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_vfm, dmi) + +#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_vfm, dmi...) \ + ENTRY(0, hid, uid, NULL, cpu_vfm, dmi) + +#define PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_vfm, dmi) + +#define NOT_PRESENT_ENTRY_PATH(path, cpu_vfm, dmi...) \ + ENTRY(0, "", NULL, path, cpu_vfm, dmi) + +static const struct override_status_id override_status_ids[] = { /* * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}), - ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("80860F09", "1", INTEL_ATOM_SILVERMONT, {}), + PRESENT_ENTRY_HID("80862288", "1", INTEL_ATOM_AIRMONT, {}), + + /* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */ + PRESENT_ENTRY_HID("80862289", "2", INTEL_ATOM_AIRMONT, { + DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), + }), + /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("INT0002", "1", INTEL_ATOM_AIRMONT, {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), { + PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), { + PRESENT_ENTRY_HID("SYNA7500", "1", INTEL_HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), /* + * The Dell XPS 15 9550 has a SMO8110 accelerometer / + * HDD freefall sensor which is wrongly marked as not present. + */ + PRESENT_ENTRY_HID("SMO8810", "1", INTEL_SKYLAKE, { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"), + }), + + /* * The GPD win BIOS dated 20170221 has disabled the accelerometer, the * drivers sometimes cause crashes under Windows and this is how the - * manufacturer has solved this :| Note that the the DMI data is less - * generic then it seems, a board_vendor of "AMI Corporation" is quite - * rare and a board_name of "Default String" also is rare. + * manufacturer has solved this :| The DMI match may not seem unique, + * but it is. In the 67000+ DMI decode dumps from linux-hardware.org + * only 116 have board_vendor set to "AMI Corporation" and of those 116 + * only the GPD win and pocket entries' board_name is "Default string". * * Unfortunately the GPD pocket also uses these strings and its BIOS * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), - ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", INTEL_ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "05/25/2017") }), + + /* + * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO + * mmc controller enabled and that has a child-device which _PS3 + * method sets a GPIO causing the PCI wifi card to turn off. + * See above remark about uniqueness of the DMI match. + */ + NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", INTEL_ATOM_AIRMONT, { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }), + + /* + * The LSM303D on the Lenovo Yoga Tablet 2 series is present + * as both ACCL0001 and MAGN0001. As we can only ever register an + * i2c client for one of them, ignore MAGN0001. + */ + NOT_PRESENT_ENTRY_HID("MAGN0001", "1", INTEL_ATOM_SILVERMONT, { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"), + }), }; -bool acpi_device_always_present(struct acpi_device *adev) +bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) { bool ret = false; unsigned int i; - for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { - if (acpi_match_device_ids(adev, always_present_ids[i].hid)) + for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) { + if (!x86_match_cpu(override_status_ids[i].cpu_ids)) continue; - if (!adev->pnp.unique_id || - strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) + if (override_status_ids[i].dmi_ids[0].matches[0].slot && + !dmi_check_system(override_status_ids[i].dmi_ids)) continue; - if (!x86_match_cpu(always_present_ids[i].cpu_ids)) - continue; + if (override_status_ids[i].path) { + struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL }; + bool match; - if (always_present_ids[i].dmi_ids[0].matches[0].slot && - !dmi_check_system(always_present_ids[i].dmi_ids)) - continue; + if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path)) + continue; + + match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0; + kfree(path.pointer); + + if (!match) + continue; + } else { + if (acpi_match_device_ids(adev, override_status_ids[i].hid)) + continue; + + if (!acpi_dev_uid_match(adev, override_status_ids[i].uid)) + continue; + } + *status = override_status_ids[i].status; ret = true; break; } return ret; } + +/* + * AMD systems from Renoir onwards *require* that the NVME controller + * is put into D3 over a Modern Standby / suspend-to-idle cycle. + * + * This is "typically" accomplished using the `StorageD3Enable` + * property in the _DSD that is checked via the `acpi_storage_d3` function + * but some OEM systems still don't have it in their BIOS. + * + * The Microsoft documentation for StorageD3Enable mentioned that Windows has + * a hardcoded allowlist for D3 support as well as a registry key to override + * the BIOS, which has been used for these cases. + * + * This allows quirking on Linux in a similar fashion. + * + * Cezanne systems shouldn't *normally* need this as the BIOS includes + * StorageD3Enable. But for two reasons we have added it. + * 1) The BIOS on a number of Dell systems have ambiguity + * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME. + * GPP1.NVME is needed to get StorageD3Enable node set properly. + * https://bugzilla.kernel.org/show_bug.cgi?id=216440 + * https://bugzilla.kernel.org/show_bug.cgi?id=216773 + * https://bugzilla.kernel.org/show_bug.cgi?id=217003 + * 2) On at least one HP system StorageD3Enable is missing on the second NVME + * disk in the system. + * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only + * NVME device. + */ +bool force_storage_d3(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_ZEN)) + return false; + return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0; +} + +/* + * x86 ACPI boards which ship with only Android as their factory image usually + * declare a whole bunch of bogus I2C devices in their ACPI tables and sometimes + * there are issues with serdev devices on these boards too, e.g. the resource + * points to the wrong serdev_controller. + * + * Instantiating I2C / serdev devs for these bogus devs causes various issues, + * e.g. GPIO/IRQ resource conflicts because sometimes drivers do bind to them. + * The Android x86 kernel fork shipped on these devices has some special code + * to remove the bogus I2C clients (and AFAICT serdevs are ignored completely). + * + * The acpi_quirk_skip_*_enumeration() functions below are used by the I2C or + * serdev code to skip instantiating any I2C or serdev devs on broken boards. + * + * In case of I2C an exception is made for HIDs on the i2c_acpi_known_good_ids + * list. These are known to always be correct (and in case of the audio-codecs + * the drivers heavily rely on the codec being enumerated through ACPI). + * + * Note these boards typically do actually have I2C and serdev devices, + * just different ones then the ones described in their DSDT. The devices + * which are actually present are manually instantiated by the + * drivers/platform/x86/x86-android-tablets.c kernel module. + */ +#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0) +#define ACPI_QUIRK_UART1_SKIP BIT(1) +#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2) +#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3) +#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4) +#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5) +#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6) + +static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { + /* + * 1. Devices with only the skip / don't-skip AC and battery quirks, + * sorted alphabetically. + */ + { + /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), + }, + .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY + }, + { + /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80XF"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + }, + .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY + }, + + /* + * 2. Devices which also have the skip i2c/serdev quirks and which + * need the x86-android-tablets module to properly work. + * Sorted alphabetically. + */ +#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) + { + /* Acer Iconia One 7 B1-750 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Acer Iconia One 8 A1-840 (non FHD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Above strings are too generic also match BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Asus ME176C tablet */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_TTY_UART2_SKIP | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Asus TF103C transformer 2-in-1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Lenovo Yoga Book X90F/L */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_SKIP | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Lenovo Yoga Tablet 2 1050F/L */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_PNP_UART1_SKIP | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Lenovo Yoga Tab 3 Pro X90F */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Medion Lifetab S10346 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Way too generic, also match on BIOS data */ + DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Nextbook Ares 8 (BYT version)*/ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Nextbook Ares 8A (CHT version)*/ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Vexia Edu Atla 10 tablet 5V version */ + .matches = { + /* Having all 3 of these not set is somewhat unique */ + DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Vexia Edu Atla 10 tablet 9V version */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_UART1_SKIP | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Whitelabel (sold as various brands) TM800A550L */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, +#endif + {} +}; + +#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) +static const struct acpi_device_id i2c_acpi_known_good_ids[] = { + { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ + { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */ + { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ + { "INT33F5", 0 }, /* TI Dollar Cove PMIC */ + { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ + { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ + { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */ + {} +}; + +bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev) +{ + const struct dmi_system_id *dmi_id; + long quirks; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (!dmi_id) + return false; + + quirks = (unsigned long)dmi_id->driver_data; + if (!(quirks & ACPI_QUIRK_SKIP_I2C_CLIENTS)) + return false; + + return acpi_match_device_ids(adev, i2c_acpi_known_good_ids); +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration); + +static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip) +{ + struct acpi_device *adev = ACPI_COMPANION(controller_parent); + const struct dmi_system_id *dmi_id; + long quirks = 0; + u64 uid = 0; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (!dmi_id) + return 0; + + quirks = (unsigned long)dmi_id->driver_data; + + /* uid is left at 0 on errors and 0 is not a valid UART UID */ + acpi_dev_uid_to_integer(adev, &uid); + + /* For PCI UARTs without an UID */ + if (!uid && dev_is_pci(controller_parent)) { + struct pci_dev *pdev = to_pci_dev(controller_parent); + + /* + * Devfn values for PCI UARTs on Bay Trail SoCs, which are + * the only devices where this fallback is necessary. + */ + if (pdev->devfn == PCI_DEVFN(0x1e, 3)) + uid = 1; + else if (pdev->devfn == PCI_DEVFN(0x1e, 4)) + uid = 2; + } + + if (!uid) + return 0; + + if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) { + /* PNP enumerated UARTs */ + if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1) + *skip = true; + + return 0; + } + + if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1) + *skip = true; + + if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) { + if (uid == 1) + return -ENODEV; /* Create tty cdev instead of serdev */ + + if (uid == 2) + *skip = true; + } + + return 0; +} + +bool acpi_quirk_skip_gpio_event_handlers(void) +{ + const struct dmi_system_id *dmi_id; + long quirks; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (!dmi_id) + return false; + + quirks = (unsigned long)dmi_id->driver_data; + return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS); +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers); +#else +static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip) +{ + return 0; +} +#endif + +int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip) +{ + struct acpi_device *adev = ACPI_COMPANION(controller_parent); + + *skip = false; + + /* + * The DELL0501 ACPI HID represents an UART (CID is set to PNP0501) with + * a backlight-controller attached. There is no separate ACPI device with + * an UartSerialBusV2() resource to model the backlight-controller. + * Set skip to true so that the tty core creates a serdev ctrl device. + * The backlight driver will manually create the serdev client device. + */ + if (adev && acpi_dev_hid_match(adev, "DELL0501")) { + *skip = true; + /* + * Create a platform dev for dell-uart-backlight to bind to. + * This is a static device, so no need to store the result. + */ + platform_device_register_simple("dell-uart-backlight", PLATFORM_DEVID_NONE, + NULL, 0); + return 0; + } + + return acpi_dmi_skip_serdev_enumeration(controller_parent, skip); +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration); + +/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */ +static const struct { + const char *hid; + int hrv; +} acpi_skip_ac_and_battery_pmic_ids[] = { + { "INT33F4", -1 }, /* X-Powers AXP288 PMIC */ + { "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */ +}; + +bool acpi_quirk_skip_acpi_ac_and_battery(void) +{ + const struct dmi_system_id *dmi_id; + long quirks = 0; + int i; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (dmi_id) + quirks = (unsigned long)dmi_id->driver_data; + + if (quirks & ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY) + return true; + + if (quirks & ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY) + return false; + + for (i = 0; i < ARRAY_SIZE(acpi_skip_ac_and_battery_pmic_ids); i++) { + if (acpi_dev_present(acpi_skip_ac_and_battery_pmic_ids[i].hid, "1", + acpi_skip_ac_and_battery_pmic_ids[i].hrv)) { + pr_info_once("found native %s PMIC, skipping ACPI AC and battery devices\n", + acpi_skip_ac_and_battery_pmic_ids[i].hid); + return true; + } + } + + return false; +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_acpi_ac_and_battery); + +/* This section provides a workaround for a specific x86 system + * which requires disabling of mwait to work correctly. + */ +static int __init acpi_proc_quirk_set_no_mwait(const struct dmi_system_id *id) +{ + pr_notice("%s detected - disabling mwait for CPU C-states\n", + id->ident); + boot_option_idle_override = IDLE_NOMWAIT; + return 0; +} + +static const struct dmi_system_id acpi_proc_quirk_mwait_dmi_table[] __initconst = { + { + .callback = acpi_proc_quirk_set_no_mwait, + .ident = "Extensa 5220", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + DMI_MATCH(DMI_BOARD_NAME, "Columbia"), + }, + .driver_data = NULL, + }, + {} +}; + +void __init acpi_proc_quirk_mwait_check(void) +{ + /* + * Check whether the system is DMI table. If yes, OSPM + * should not use mwait for CPU-states. + */ + dmi_check_system(acpi_proc_quirk_mwait_dmi_table); +} |
