summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-firmware-zynqmp141
-rw-r--r--Documentation/ABI/testing/sysfs-bus-peci16
-rw-r--r--Documentation/devicetree/bindings/gpio/delta,tn48m-gpio.yaml39
-rw-r--r--Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml90
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml38
-rw-r--r--Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml86
-rw-r--r--Documentation/devicetree/bindings/peci/peci-aspeed.yaml72
-rw-r--r--Documentation/devicetree/bindings/peci/peci-controller.yaml33
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/google,open-dice.yaml46
-rw-r--r--Documentation/devicetree/bindings/reset/delta,tn48m-reset.yaml35
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--Documentation/driver-api/nvmem.rst28
-rw-r--r--Documentation/hwmon/index.rst2
-rw-r--r--Documentation/hwmon/peci-cputemp.rst90
-rw-r--r--Documentation/hwmon/peci-dimmtemp.rst57
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/peci/index.rst16
-rw-r--r--Documentation/peci/peci.rst51
-rw-r--r--MAINTAINERS42
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi11
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi11
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi11
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accessibility/speakup/speakup_audptr.c24
-rw-r--r--drivers/accessibility/speakup/synth.c2
-rw-r--r--drivers/android/binder_alloc.c8
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/hpet.c28
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/comedi/drivers/das16.c4
-rw-r--r--drivers/comedi/drivers/ni_routes.c6
-rw-r--r--drivers/comedi/drivers/pcm3724.c1
-rw-r--r--drivers/dio/dio.c140
-rw-r--r--drivers/firmware/stratix10-svc.c9
-rw-r--r--drivers/firmware/xilinx/zynqmp.c120
-rw-r--r--drivers/fsi/fsi-core.c11
-rw-r--r--drivers/fsi/fsi-master-aspeed.c19
-rw-r--r--drivers/fsi/fsi-occ.c87
-rw-r--r--drivers/fsi/fsi-sbefifo.c53
-rw-r--r--drivers/fsi/fsi-scom.c45
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-tn48m.c100
-rw-r--r--drivers/greybus/svc.c16
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/peci/Kconfig31
-rw-r--r--drivers/hwmon/peci/Makefile7
-rw-r--r--drivers/hwmon/peci/common.h58
-rw-r--r--drivers/hwmon/peci/cputemp.c592
-rw-r--r--drivers/hwmon/peci/dimmtemp.c630
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/simple-mfd-i2c.c1
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/alcor_pci.c9
-rw-r--r--drivers/misc/cardreader/rtl8411.c2
-rw-r--r--drivers/misc/cardreader/rts5209.c2
-rw-r--r--drivers/misc/cardreader/rts5227.c47
-rw-r--r--drivers/misc/cardreader/rts5228.c23
-rw-r--r--drivers/misc/cardreader/rts5229.c2
-rw-r--r--drivers/misc/cardreader/rts5249.c31
-rw-r--r--drivers/misc/cardreader/rts5261.c35
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c228
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h3
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/mei/client.c1
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/hw-me.c43
-rw-r--r--drivers/misc/mei/init.c5
-rw-r--r--drivers/misc/mei/pci-me.c10
-rw-r--r--drivers/misc/ocxl/link.c2
-rw-r--r--drivers/misc/open-dice.c208
-rw-r--r--drivers/misc/sgi-gru/grutables.h6
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c363
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c26
-rw-r--r--drivers/mtd/mtdcore.c13
-rw-r--r--drivers/mux/core.c239
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/core.c47
-rw-r--r--drivers/nvmem/layerscape-sfp.c89
-rw-r--r--drivers/nvmem/meson-mx-efuse.c3
-rw-r--r--drivers/nvmem/qfprom.c4
-rw-r--r--drivers/nvmem/sunplus-ocotp.c228
-rw-r--r--drivers/nvmem/sunxi_sid.c6
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/peci/Kconfig36
-rw-r--r--drivers/peci/Makefile10
-rw-r--r--drivers/peci/controller/Kconfig18
-rw-r--r--drivers/peci/controller/Makefile3
-rw-r--r--drivers/peci/controller/peci-aspeed.c599
-rw-r--r--drivers/peci/core.c236
-rw-r--r--drivers/peci/cpu.c343
-rw-r--r--drivers/peci/device.c252
-rw-r--r--drivers/peci/internal.h136
-rw-r--r--drivers/peci/request.c482
-rw-r--r--drivers/peci/sysfs.c82
-rw-r--r--drivers/pps/clients/pps-gpio.c2
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-tn48m.c128
-rw-r--r--drivers/thunderbolt/nvm.c6
-rw-r--r--drivers/w1/slaves/w1_therm.c8
-rw-r--r--include/dt-bindings/reset/delta,tn48m-reset.h20
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h21
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h4
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h25
-rw-r--r--include/linux/mux/consumer.h18
-rw-r--r--include/linux/nvmem-provider.h8
-rw-r--r--include/linux/peci-cpu.h40
-rw-r--r--include/linux/peci.h112
-rw-r--r--include/linux/rtsx_pci.h5
-rw-r--r--include/linux/vmw_vmci_defs.h84
-rw-r--r--include/trace/events/fsi.h86
-rw-r--r--include/trace/events/fsi_master_aspeed.h12
-rw-r--r--include/uapi/linux/fsi.h14
-rw-r--r--kernel/configs/android-recommended.config2
-rw-r--r--tools/testing/selftests/lkdtm/config2
123 files changed, 6897 insertions, 590 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp b/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp
index f5724bb5b462..c3fec3c835af 100644
--- a/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp
+++ b/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp
@@ -113,3 +113,144 @@ Description:
# echo 0 > /sys/devices/platform/firmware\:zynqmp-firmware/health_status
Users: Xilinx
+
+What: /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+Date: Feb 2022
+KernelVersion: 5.18
+Contact: "Ronak Jain" <ronak.jain@xilinx.com>
+Description:
+ This sysfs interface allows user to configure features at
+ runtime. The user can enable or disable features running at
+ firmware as well as the user can configure the parameters of
+ the features at runtime. The supported features are over
+ temperature and external watchdog. Here, the external watchdog
+ is completely different than the /dev/watchdog as the external
+ watchdog is running on the firmware and it is used to monitor
+ the health of firmware not APU(Linux). Also, the external
+ watchdog is interfaced outside of the zynqmp soc.
+
+ The supported config ids are for the feature configuration is,
+ 1. PM_FEATURE_OVERTEMP_STATUS = 1, the user can enable or
+ disable the over temperature feature.
+ 2. PM_FEATURE_OVERTEMP_VALUE = 2, the user can configure the
+ over temperature limit in Degree Celsius.
+ 3. PM_FEATURE_EXTWDT_STATUS = 3, the user can enable or disable
+ the external watchdog feature.
+ 4. PM_FEATURE_EXTWDT_VALUE = 4, the user can configure the
+ external watchdog feature.
+
+ Usage:
+
+ Select over temperature config ID to enable/disable feature
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+
+ Check over temperature config ID is selected or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ The expected result is 1.
+
+ Select over temperature config ID to configure OT limit
+ # echo 2 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+
+ Check over temperature config ID is selected or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ The expected result is 2.
+
+ Select external watchdog config ID to enable/disable feature
+ # echo 3 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+
+ Check external watchdog config ID is selected or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ The expected result is 3.
+
+ Select external watchdog config ID to configure time interval
+ # echo 4 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+
+ Check external watchdog config ID is selected or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ The expected result is 4.
+
+Users: Xilinx
+
+What: /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+Date: Feb 2022
+KernelVersion: 5.18
+Contact: "Ronak Jain" <ronak.jain@xilinx.com>
+Description:
+ This sysfs interface allows to configure features at runtime.
+ The user can enable or disable features running at firmware.
+ Also, the user can configure the parameters of the features
+ at runtime. The supported features are over temperature and
+ external watchdog. Here, the external watchdog is completely
+ different than the /dev/watchdog as the external watchdog is
+ running on the firmware and it is used to monitor the health
+ of firmware not APU(Linux). Also, the external watchdog is
+ interfaced outside of the zynqmp soc.
+
+ By default the features are disabled in the firmware. The user
+ can enable features by querying appropriate config id of the
+ features.
+
+ The default limit for the over temperature is 90 Degree Celsius.
+ The default timer interval for the external watchdog is 570ms.
+
+ The supported config ids are for the feature configuration is,
+ 1. PM_FEATURE_OVERTEMP_STATUS = 1, the user can enable or
+ disable the over temperature feature.
+ 2. PM_FEATURE_OVERTEMP_VALUE = 2, the user can configure the
+ over temperature limit in Degree Celsius.
+ 3. PM_FEATURE_EXTWDT_STATUS = 3, the user can enable or disable
+ the external watchdog feature.
+ 4. PM_FEATURE_EXTWDT_VALUE = 4, the user can configure the
+ external watchdog feature.
+
+ Usage:
+
+ Enable over temperature feature
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the over temperature feature is enabled or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 1.
+
+ Disable over temperature feature
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 0 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the over temperature feature is disabled or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 0.
+
+ Configure over temperature limit to 50 Degree Celsius
+ # echo 2 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 50 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the over temperature limit is configured or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 50.
+
+ Enable external watchdog feature
+ # echo 3 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the external watchdog feature is enabled or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 1.
+
+ Disable external watchdog feature
+ # echo 3 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 0 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the external watchdog feature is disabled or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 0.
+
+ Configure external watchdog timer interval to 500ms
+ # echo 4 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_id
+ # echo 500 > /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+
+ Check whether the external watchdog timer interval is configured or not
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/feature_config_value
+ The expected result is 500.
+
+Users: Xilinx
diff --git a/Documentation/ABI/testing/sysfs-bus-peci b/Documentation/ABI/testing/sysfs-bus-peci
new file mode 100644
index 000000000000..87454ec5d981
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-peci
@@ -0,0 +1,16 @@
+What: /sys/bus/peci/rescan
+Date: July 2021
+KernelVersion: 5.18
+Contact: Iwona Winiarska <iwona.winiarska@intel.com>
+Description:
+ Writing a non-zero value to this attribute will
+ initiate scan for PECI devices on all PECI controllers
+ in the system.
+
+What: /sys/bus/peci/devices/<controller_id>-<device_addr>/remove
+Date: July 2021
+KernelVersion: 5.18
+Contact: Iwona Winiarska <iwona.winiarska@intel.com>
+Description:
+ Writing a non-zero value to this attribute will
+ remove the PECI device and any of its children.
diff --git a/Documentation/devicetree/bindings/gpio/delta,tn48m-gpio.yaml b/Documentation/devicetree/bindings/gpio/delta,tn48m-gpio.yaml
new file mode 100644
index 000000000000..e3e668a12091
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/delta,tn48m-gpio.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/delta,tn48m-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Delta Networks TN48M CPLD GPIO controller
+
+maintainers:
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ This module is part of the Delta TN48M multi-function device. For more
+ details see ../mfd/delta,tn48m-cpld.yaml.
+
+ Delta TN48M has an onboard Lattice CPLD that is used as an GPIO expander.
+ It provides 12 pins in total, they are input-only or ouput-only type.
+
+properties:
+ compatible:
+ enum:
+ - delta,tn48m-gpo
+ - delta,tn48m-gpi
+
+ reg:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+required:
+ - compatible
+ - reg
+ - "#gpio-cells"
+ - gpio-controller
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml b/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
new file mode 100644
index 000000000000..f6967c1f6235
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/delta,tn48m-cpld.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Delta Networks TN48M CPLD controller
+
+maintainers:
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ Lattice CPLD onboard the TN48M switches is used for system
+ management.
+
+ It provides information about the hardware model, revision,
+ PSU status etc.
+
+ It is also being used as a GPIO expander and reset controller
+ for the switch MAC-s and other peripherals.
+
+properties:
+ compatible:
+ const: delta,tn48m-cpld
+
+ reg:
+ description:
+ I2C device address.
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+patternProperties:
+ "^gpio(@[0-9a-f]+)?$":
+ $ref: ../gpio/delta,tn48m-gpio.yaml
+
+ "^reset-controller?$":
+ $ref: ../reset/delta,tn48m-reset.yaml
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpld@41 {
+ compatible = "delta,tn48m-cpld";
+ reg = <0x41>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@31 {
+ compatible = "delta,tn48m-gpo";
+ reg = <0x31>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@3a {
+ compatible = "delta,tn48m-gpi";
+ reg = <0x3a>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@40 {
+ compatible = "delta,tn48m-gpi";
+ reg = <0x40>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ reset-controller {
+ compatible = "delta,tn48m-reset";
+ #reset-cells = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index 6687ab720304..e558587ff885 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -20,6 +20,7 @@ properties:
- const: allwinner,sun7i-a20-sid
- const: allwinner,sun8i-a83t-sid
- const: allwinner,sun8i-h3-sid
+ - const: allwinner,sun20i-d1-sid
- const: allwinner,sun50i-a64-sid
- items:
- const: allwinner,sun50i-a100-sid
diff --git a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
new file mode 100644
index 000000000000..80914b93638e
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/fsl,layerscape-sfp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Layerscape Security Fuse Processor
+
+maintainers:
+ - Michael Walle <michael@walle.cc>
+
+description: |
+ SFP is the security fuse processor which among other things provide a
+ unique identifier per part.
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - fsl,ls1028a-sfp
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ efuse@1e80000 {
+ compatible = "fsl,ls1028a-sfp";
+ reg = <0x1e80000 0x8000>;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
new file mode 100644
index 000000000000..4b28f37dfb67
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/sunplus,sp7021-ocotp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: On-Chip OTP Memory for Sunplus SP7021
+
+maintainers:
+ - Vincent Shih <vincent.sunplus@gmail.com>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ const: sunplus,sp7021-ocotp
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: hb_gpio
+ - const: otprx
+
+ clocks:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ thermal-calibration:
+ type: object
+ description: thermal calibration values
+
+ disconnect-voltage:
+ type: object
+ description: disconnect voltages of usb2 port 0 and port 1
+
+ mac-address0:
+ type: object
+ description: MAC address of ethernet port 0
+
+ mac-address1:
+ type: object
+ description: MAC address of ethernet port 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sp-sp7021.h>
+
+ otp: otp@9c00af00 {
+ compatible = "sunplus,sp7021-ocotp";
+ reg = <0x9c00af00 0x34>, <0x9c00af80 0x58>;
+ reg-names = "hb_gpio", "otprx";
+ clocks = <&clks OTPRX>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ therm_calib: thermal-calibration@14 {
+ reg = <0x14 0x3>;
+ };
+ disc_vol: disconnect-voltage@18 {
+ reg = <0x18 0x2>;
+ };
+ mac_addr0: mac-address0@34 {
+ reg = <0x34 0x6>;
+ };
+ mac_addr1: mac-address1@3a {
+ reg = <0x3a 0x6>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-aspeed.yaml b/Documentation/devicetree/bindings/peci/peci-aspeed.yaml
new file mode 100644
index 000000000000..1e68a801a92a
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-aspeed.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-aspeed.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed PECI Bus Device Tree Bindings
+
+maintainers:
+ - Iwona Winiarska <iwona.winiarska@intel.com>
+ - Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+allOf:
+ - $ref: peci-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-peci
+ - aspeed,ast2500-peci
+ - aspeed,ast2600-peci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description:
+ Clock source for PECI controller. Should reference the external
+ oscillator clock.
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ cmd-timeout-ms:
+ minimum: 1
+ maximum: 1000
+ default: 1000
+
+ clock-frequency:
+ description:
+ The desired operation frequency of PECI controller in Hz.
+ minimum: 2000
+ maximum: 2000000
+ default: 1000000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+ peci-controller@1e78b000 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x1e78b000 0x100>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ cmd-timeout-ms = <1000>;
+ clock-frequency = <1000000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-controller.yaml b/Documentation/devicetree/bindings/peci/peci-controller.yaml
new file mode 100644
index 000000000000..bbc3d3f3a929
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-controller.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Device Tree Bindings for PECI
+
+maintainers:
+ - Iwona Winiarska <iwona.winiarska@intel.com>
+
+description:
+ PECI (Platform Environment Control Interface) is an interface that provides a
+ communication channel from Intel processors and chipset components to external
+ monitoring or control devices.
+
+properties:
+ $nodename:
+ pattern: "^peci-controller(@.*)?$"
+
+ cmd-timeout-ms:
+ description:
+ Command timeout in units of ms.
+
+additionalProperties: true
+
+examples:
+ - |
+ peci-controller@1e78b000 {
+ reg = <0x1e78b000 0x100>;
+ cmd-timeout-ms = <500>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/reserved-memory/google,open-dice.yaml b/Documentation/devicetree/bindings/reserved-memory/google,open-dice.yaml
new file mode 100644
index 000000000000..257a0b51994a
--- /dev/null
+++ b/Documentation/devicetree/bindings/reserved-memory/google,open-dice.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reserved-memory/google,open-dice.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Open Profile for DICE Device Tree Bindings
+
+description: |
+ This binding represents a reserved memory region containing data
+ generated by the Open Profile for DICE protocol.
+
+ See https://pigweed.googlesource.com/open-dice/
+
+maintainers:
+ - David Brazdil <dbrazdil@google.com>
+
+allOf:
+ - $ref: "reserved-memory.yaml"
+
+properties:
+ compatible:
+ const: google,open-dice
+
+ reg:
+ description: page-aligned region of memory containing DICE data
+
+required:
+ - compatible
+ - reg
+ - no-map
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ dice: dice@12340000 {
+ compatible = "google,open-dice";
+ reg = <0x00 0x12340000 0x2000>;
+ no-map;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reset/delta,tn48m-reset.yaml b/Documentation/devicetree/bindings/reset/delta,tn48m-reset.yaml
new file mode 100644
index 000000000000..0e5ee8decc0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/delta,tn48m-reset.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/delta,tn48m-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Delta Networks TN48M CPLD reset controller
+
+maintainers:
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ This module is part of the Delta TN48M multi-function device. For more
+ details see ../mfd/delta,tn48m-cpld.yaml.
+
+ Reset controller modules provides resets for the following:
+ * 88F7040 SoC
+ * 88F6820 SoC
+ * 98DX3265 switch MAC-s
+ * 88E1680 PHY-s
+ * 88E1512 PHY
+ * PoE PSE controller
+
+properties:
+ compatible:
+ const: delta,tn48m-reset
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - "#reset-cells"
+
+additionalProperties: false
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 148e19381b79..5018403fe82f 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -368,6 +368,7 @@ MUX
devm_mux_chip_alloc()
devm_mux_chip_register()
devm_mux_control_get()
+ devm_mux_state_get()
NET
devm_alloc_etherdev()
diff --git a/Documentation/driver-api/nvmem.rst b/Documentation/driver-api/nvmem.rst
index 287e86819640..e3366322d46c 100644
--- a/Documentation/driver-api/nvmem.rst
+++ b/Documentation/driver-api/nvmem.rst
@@ -26,9 +26,7 @@ was a rather big abstraction leak.
This framework aims at solve these problems. It also introduces DT
representation for consumer devices to go get the data they require (MAC
-Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs. This
-framework is based on regmap, so that most of the abstraction available in
-regmap can be reused, across multiple types of buses.
+Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs.
NVMEM Providers
+++++++++++++++
@@ -45,23 +43,21 @@ nvmem_device pointer.
nvmem_unregister(nvmem) is used to unregister a previously registered provider.
-For example, a simple qfprom case::
+For example, a simple nvram case::
- static struct nvmem_config econfig = {
- .name = "qfprom",
- .owner = THIS_MODULE,
- };
-
- static int qfprom_probe(struct platform_device *pdev)
+ static int brcm_nvram_probe(struct platform_device *pdev)
{
+ struct nvmem_config config = {
+ .name = "brcm-nvram",
+ .reg_read = brcm_nvram_read,
+ };
...
- econfig.dev = &pdev->dev;
- nvmem = nvmem_register(&econfig);
- ...
- }
+ config.dev = &pdev->dev;
+ config.priv = priv;
+ config.size = resource_size(res);
-It is mandatory that the NVMEM provider has a regmap associated with its
-struct device. Failure to do would return error code from nvmem_register().
+ devm_nvmem_register(&config);
+ }
Users of board files can define and register nvmem cells using the
nvmem_cell_table struct::
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index df20022c741f..f387f661e1d7 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -161,6 +161,8 @@ Hardware Monitoring Kernel Drivers
pcf8591
pim4328
pm6764tr
+ peci-cputemp
+ peci-dimmtemp
pmbus
powr1220
pxe1610
diff --git a/Documentation/hwmon/peci-cputemp.rst b/Documentation/hwmon/peci-cputemp.rst
new file mode 100644
index 000000000000..fe0422248dc5
--- /dev/null
+++ b/Documentation/hwmon/peci-cputemp.rst
@@ -0,0 +1,90 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+Kernel driver peci-cputemp
+==========================
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author: Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides Digital
+Thermal Sensor (DTS) thermal readings of the CPU package and CPU cores that are
+accessible via the processor PECI interface.
+
+All temperature values are given in millidegree Celsius and will be measurable
+only when the target CPU is powered on.
+
+Sysfs interface
+-------------------
+
+======================= =======================================================
+temp1_label "Die"
+temp1_input Provides current die temperature of the CPU package.
+temp1_max Provides thermal control temperature of the CPU package
+ which is also known as Tcontrol.
+temp1_crit Provides shutdown temperature of the CPU package which
+ is also known as the maximum processor junction
+ temperature, Tjmax or Tprochot.
+temp1_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
+ the CPU package.
+
+temp2_label "DTS"
+temp2_input Provides current temperature of the CPU package scaled
+ to match DTS thermal profile.
+temp2_max Provides thermal control temperature of the CPU package
+ which is also known as Tcontrol.
+temp2_crit Provides shutdown temperature of the CPU package which
+ is also known as the maximum processor junction
+ temperature, Tjmax or Tprochot.
+temp2_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
+ the CPU package.
+
+temp3_label "Tcontrol"
+temp3_input Provides current Tcontrol temperature of the CPU
+ package which is also known as Fan Temperature target.
+ Indicates the relative value from thermal monitor trip
+ temperature at which fans should be engaged.
+temp3_crit Provides Tcontrol critical value of the CPU package
+ which is same to Tjmax.
+
+temp4_label "Tthrottle"
+temp4_input Provides current Tthrottle temperature of the CPU
+ package. Used for throttling temperature. If this value
+ is allowed and lower than Tjmax - the throttle will
+ occur and reported at lower than Tjmax.
+
+temp5_label "Tjmax"
+temp5_input Provides the maximum junction temperature, Tjmax of the
+ CPU package.
+
+temp[6-N]_label Provides string "Core X", where X is resolved core
+ number.
+temp[6-N]_input Provides current temperature of each core.
+
+======================= =======================================================
diff --git a/Documentation/hwmon/peci-dimmtemp.rst b/Documentation/hwmon/peci-dimmtemp.rst
new file mode 100644
index 000000000000..e562aed620de
--- /dev/null
+++ b/Documentation/hwmon/peci-dimmtemp.rst
@@ -0,0 +1,57 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver peci-dimmtemp
+===========================
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author: Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides
+Temperature sensor on DIMM readings that are accessible via the processor PECI interface.
+
+All temperature values are given in millidegree Celsius and will be measurable
+only when the target CPU is powered on.
+
+Sysfs interface
+-------------------
+
+======================= =======================================================
+
+temp[N]_label Provides string "DIMM CI", where C is DIMM channel and
+ I is DIMM index of the populated DIMM.
+temp[N]_input Provides current temperature of the populated DIMM.
+temp[N]_max Provides thermal control temperature of the DIMM.
+temp[N]_crit Provides shutdown temperature of the DIMM.
+
+======================= =======================================================
+
+Note:
+ DIMM temperature attributes will appear when the client CPU's BIOS
+ completes memory training and testing.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index b58692d687f6..1988c19d9daf 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -138,6 +138,7 @@ needed).
scheduler/index
mhi/index
tty/index
+ peci/index
Architecture-agnostic documentation
-----------------------------------
diff --git a/Documentation/peci/index.rst b/Documentation/peci/index.rst
new file mode 100644
index 000000000000..989de10416e7
--- /dev/null
+++ b/Documentation/peci/index.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+====================
+Linux PECI Subsystem
+====================
+
+.. toctree::
+
+ peci
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/peci/peci.rst b/Documentation/peci/peci.rst
new file mode 100644
index 000000000000..331b1ec00e22
--- /dev/null
+++ b/Documentation/peci/peci.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+========
+Overview
+========
+
+The Platform Environment Control Interface (PECI) is a communication
+interface between Intel processor and management controllers
+(e.g. Baseboard Management Controller, BMC).
+PECI provides services that allow the management controller to
+configure, monitor and debug platform by accessing various registers.
+It defines a dedicated command protocol, where the management
+controller is acting as a PECI originator and the processor - as
+a PECI responder.
+PECI can be used in both single processor and multiple-processor based
+systems.
+
+NOTE:
+Intel PECI specification is not released as a dedicated document,
+instead it is a part of External Design Specification (EDS) for given
+Intel CPU. External Design Specifications are usually not publicly
+available.
+
+PECI Wire
+---------
+
+PECI Wire interface uses a single wire for self-clocking and data
+transfer. It does not require any additional control lines - the
+physical layer is a self-clocked one-wire bus signal that begins each
+bit with a driven, rising edge from an idle near zero volts. The
+duration of the signal driven high allows to determine whether the bit
+value is logic '0' or logic '1'. PECI Wire also includes variable data
+rate established with every message.
+
+For PECI Wire, each processor package will utilize unique, fixed
+addresses within a defined range and that address should
+have a fixed relationship with the processor socket ID - if one of the
+processors is removed, it does not affect addresses of remaining
+processors.
+
+PECI subsystem internals
+------------------------
+
+.. kernel-doc:: include/linux/peci.h
+.. kernel-doc:: drivers/peci/internal.h
+.. kernel-doc:: drivers/peci/core.c
+.. kernel-doc:: drivers/peci/request.c
+
+PECI CPU Driver API
+-------------------
+.. kernel-doc:: drivers/peci/cpu.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 1ba1e4af2cbc..37ea1436fc62 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2992,6 +2992,14 @@ S: Maintained
F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml
F: drivers/net/ethernet/asix/ax88796c_*
+ASPEED PECI CONTROLLER
+M: Iwona Winiarska <iwona.winiarska@intel.com>
+L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/peci/peci-aspeed.yaml
+F: drivers/peci/controller/peci-aspeed.c
+
ASPEED PINCTRL DRIVERS
M: Andrew Jeffery <andrew@aj.id.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
@@ -5494,6 +5502,15 @@ S: Maintained
F: Documentation/hwmon/dps920ab.rst
F: drivers/hwmon/pmbus/dps920ab.c
+DELTA NETWORKS TN48M CPLD DRIVERS
+M: Robert Marko <robert.marko@sartura.hr>
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/delta,tn48m-gpio.yaml
+F: Documentation/devicetree/bindings/mfd/delta,tn48m-cpld.yaml
+F: Documentation/devicetree/bindings/reset/delta,tn48m-reset.yaml
+F: drivers/gpio/gpio-tn48m.c
+F: include/dt-bindings/reset/delta,tn48m-reset.h
+
DENALI NAND DRIVER
L: linux-mtd@lists.infradead.org
S: Orphan
@@ -9907,6 +9924,7 @@ F: drivers/firmware/stratix10-rsu.c
F: drivers/firmware/stratix10-svc.c
F: include/linux/firmware/intel/stratix10-smc.h
F: include/linux/firmware/intel/stratix10-svc-client.h
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
INTEL TELEMETRY DRIVER
M: Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
@@ -15118,6 +15136,24 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/peaq-wmi.c
+PECI HARDWARE MONITORING DRIVERS
+M: Iwona Winiarska <iwona.winiarska@intel.com>
+L: linux-hwmon@vger.kernel.org
+S: Supported
+F: Documentation/hwmon/peci-cputemp.rst
+F: Documentation/hwmon/peci-dimmtemp.rst
+F: drivers/hwmon/peci/
+
+PECI SUBSYSTEM
+M: Iwona Winiarska <iwona.winiarska@intel.com>
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/peci/
+F: Documentation/peci/
+F: drivers/peci/
+F: include/linux/peci-cpu.h
+F: include/linux/peci.h
+
PENSANDO ETHERNET DRIVERS
M: Shannon Nelson <snelson@pensando.io>
M: drivers@pensando.io
@@ -18542,6 +18578,12 @@ S: Maintained
F: Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml
F: drivers/rtc/rtc-sunplus.c
+SUNPLUS OCOTP DRIVER
+M: Vincent Shih <vincent.sunplus@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
+F: drivers/nvmem/sunplus-ocotp.c
+
SUPERH
M: Yoshinori Sato <ysato@users.sourceforge.jp>
M: Rich Felker <dalias@libc.org>
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index f14dace34c5a..fa8b581c3d6c 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -392,6 +392,17 @@
};
};
+ peci0: peci-controller@1e78b000 {
+ compatible = "aspeed,ast2400-peci";
+ reg = <0x1e78b000 0x60>;
+ interrupts = <15>;
+ clocks = <&syscon ASPEED_CLK_GATE_REFCLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ cmd-timeout-ms = <1000>;
+ clock-frequency = <1000000>;
+ status = "disabled";
+ };
+
uart2: serial@1e78d000 {
compatible = "ns16550a";
reg = <0x1e78d000 0x20>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 7495f93c5069..4147b397c883 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -516,6 +516,17 @@
};
};
+ peci0: peci-controller@1e78b000 {
+ compatible = "aspeed,ast2500-peci";
+ reg = <0x1e78b000 0x60>;
+ interrupts = <15>;
+ clocks = <&syscon ASPEED_CLK_GATE_REFCLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ cmd-timeout-ms = <1000>;
+ clock-frequency = <1000000>;
+ status = "disabled";
+ };
+
uart2: serial@1e78d000 {
compatible = "ns16550a";
reg = <0x1e78d000 0x20>;
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index c32e87fad4dc..3d5ce9da42c3 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -512,6 +512,17 @@
status = "disabled";
};
+ peci0: peci-controller@1e78b000 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x1e78b000 0x100>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ cmd-timeout-ms = <1000>;
+ clock-frequency = <1000000>;
+ status = "disabled";
+ };
+
lpc: lpc@1e789000 {
compatible = "aspeed,ast2600-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0d399ddaa185..8d6cd5d08722 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -236,4 +236,7 @@ source "drivers/interconnect/Kconfig"
source "drivers/counter/Kconfig"
source "drivers/most/Kconfig"
+
+source "drivers/peci/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index a110338c860c..020780b6b4d2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -187,3 +187,4 @@ obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_PECI) += peci/
diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c
index e89fd72579e6..a0c3b8ae17a1 100644
--- a/drivers/accessibility/speakup/speakup_audptr.c
+++ b/drivers/accessibility/speakup/speakup_audptr.c
@@ -126,20 +126,22 @@ static void synth_flush(struct spk_synth *synth)
static void synth_version(struct spk_synth *synth)
{
- unsigned char test = 0;
- char synth_id[40] = "";
+ unsigned i;
+ char synth_id[33];
synth->synth_immediate(synth, "\x05[Q]");
- synth_id[test] = synth->io_ops->synth_in(synth);
- if (synth_id[test] == 'A') {
- do {
- /* read version string from synth */
- synth_id[++test] = synth->io_ops->synth_in(synth);
- } while (synth_id[test] != '\n' && test < 32);
- synth_id[++test] = 0x00;
+ synth_id[0] = synth->io_ops->synth_in(synth);
+ if (synth_id[0] != 'A')
+ return;
+
+ for (i = 1; i < sizeof(synth_id) - 1; i++) {
+ /* read version string from synth */
+ synth_id[i] = synth->io_ops->synth_in(synth);
+ if (synth_id[i] == '\n')
+ break;
}
- if (synth_id[0] == 'A')
- pr_info("%s version: %s", synth->long_name, synth_id);
+ synth_id[i] = '\0';
+ pr_info("%s version: %s", synth->long_name, synth_id);
}
static int synth_probe(struct spk_synth *synth)
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index 2b8699673bac..eea2a2fa4f01 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -348,7 +348,7 @@ struct var_t synth_time_vars[] = {
{ TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } },
{ JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } },
{ FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } },
- { FLUSH, .u.n = {NULL, 4000, 100, 4000, 0, 0, NULL } },
+ { FLUSH, .u.n = {NULL, 4000, 10, 4000, 0, 0, NULL } },
V_LAST_VAR
};
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 47bc74a8c7b6..2ac1008a5f39 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1049,18 +1049,14 @@ err_get_alloc_mutex_failed:
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- unsigned long ret = list_lru_count(&binder_alloc_lru);
- return ret;
+ return list_lru_count(&binder_alloc_lru);
}
static unsigned long
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- unsigned long ret;
-
- ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
NULL, sc->nr_to_scan);
- return ret;
}
static struct shrinker binder_shrinker = {
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index cce2af5df7b4..d5f943938427 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -60,7 +60,7 @@ struct bsr_dev {
};
static unsigned total_bsr_devs;
-static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
+static LIST_HEAD(bsr_devs);
static struct class *bsr_class;
static int bsr_major;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 563dfae3b8da..ee71376f174b 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
@@ -120,22 +121,6 @@ static struct hpets *hpets;
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
-
-#ifndef readq
-static inline unsigned long long readq(void __iomem *addr)
-{
- return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(unsigned long long v, void __iomem *addr)
-{
- writel(v & 0xffffffff, addr);
- writel(v >> 32, addr + 4);
-}
-#endif
-
static irqreturn_t hpet_interrupt(int irq, void *data)
{
struct hpet_dev *devp;
@@ -268,9 +253,9 @@ static int hpet_open(struct inode *inode, struct file *file)
for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
for (i = 0; i < hpetp->hp_ntimer; i++)
- if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
+ if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) {
continue;
- else {
+ } else {
devp = &hpetp->hp_dev[i];
break;
}
@@ -317,9 +302,9 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
devp->hd_irqdata = 0;
spin_unlock_irq(&hpet_lock);
- if (data)
+ if (data) {
break;
- else if (file->f_flags & O_NONBLOCK) {
+ } else if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
} else if (signal_pending(current)) {
@@ -982,7 +967,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
break;
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
- irqp->triggering, irqp->polarity);
+ irqp->triggering,
+ irqp->polarity);
if (irq < 0)
return AE_ERROR;
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
index 02225eb19cf6..619f3a30ec55 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.c
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -111,7 +111,7 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
}
/**
- * fifo_icap_set_read_size - Set the the size register.
+ * fifo_icap_set_read_size - Set the size register.
* @drvdata: a pointer to the drvdata.
* @data: the size of the following read transaction, in words.
**/
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 067396bedf22..74a4928aea1d 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -241,7 +241,7 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
buffer[index++] = XHI_NOOP_PACKET;
/*
- * Write the data to the FIFO and intiate the transfer of data present
+ * Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
return drvdata->config->set_configuration(drvdata,
@@ -297,7 +297,7 @@ static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
buffer[index++] = XHI_NOOP_PACKET;
/*
- * Write the data to the FIFO and intiate the transfer of data present
+ * Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
@@ -384,7 +384,7 @@ hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
drvdata->read_buffer + bytes_to_read,
4 - bytes_to_read);
} else {
- /* Get new data from the ICAP, and return was was requested. */
+ /* Get new data from the ICAP, and return what was requested. */
kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index 937a69ce0977..728dc02156c8 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -961,7 +961,7 @@ static const struct comedi_lrange *das16_ai_range(struct comedi_device *dev,
/* allocate single-range range table */
lrange = comedi_alloc_spriv(s,
- sizeof(*lrange) + sizeof(*krange));
+ struct_size(lrange, range, 1));
if (!lrange)
return &range_unknown;
@@ -995,7 +995,7 @@ static const struct comedi_lrange *das16_ao_range(struct comedi_device *dev,
/* allocate single-range range table */
lrange = comedi_alloc_spriv(s,
- sizeof(*lrange) + sizeof(*krange));
+ struct_size(lrange, range, 1));
if (!lrange)
return &range_unknown;
diff --git a/drivers/comedi/drivers/ni_routes.c b/drivers/comedi/drivers/ni_routes.c
index f24eeb464eba..295a3a9ee0c9 100644
--- a/drivers/comedi/drivers/ni_routes.c
+++ b/drivers/comedi/drivers/ni_routes.c
@@ -56,8 +56,7 @@ static const u8 *ni_find_route_values(const char *device_family)
int i;
for (i = 0; ni_all_route_values[i]; ++i) {
- if (memcmp(ni_all_route_values[i]->family, device_family,
- strnlen(device_family, 30)) == 0) {
+ if (!strcmp(ni_all_route_values[i]->family, device_family)) {
rv = &ni_all_route_values[i]->register_values[0][0];
break;
}
@@ -75,8 +74,7 @@ ni_find_valid_routes(const char *board_name)
int i;
for (i = 0; ni_device_routes_list[i]; ++i) {
- if (memcmp(ni_device_routes_list[i]->device, board_name,
- strnlen(board_name, 30)) == 0) {
+ if (!strcmp(ni_device_routes_list[i]->device, board_name)) {
dr = ni_device_routes_list[i];
break;
}
diff --git a/drivers/comedi/drivers/pcm3724.c b/drivers/comedi/drivers/pcm3724.c
index e4103f9eeced..ca8bef54dacc 100644
--- a/drivers/comedi/drivers/pcm3724.c
+++ b/drivers/comedi/drivers/pcm3724.c
@@ -93,7 +93,6 @@ static void do_3724_config(struct comedi_device *dev,
unsigned long port_8255_cfg;
config = I8255_CTRL_CW;
- buffer_config = 0;
/* 1 in io_bits indicates output, 1 in config indicates input */
if (!(s->io_bits & 0x0000ff))
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 4c06c93c93d3..005a82f671c3 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -2,27 +2,27 @@
/* Code to support devices on the DIO and DIO-II bus
* Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
* Copyright (C) 2004 Jochen Friedrich <jochen@scram.de>
- *
+ *
* This code has basically these routines at the moment:
* int dio_find(u_int deviceid)
* Search the list of DIO devices and return the select code
* of the next unconfigured device found that matches the given device ID.
* Note that the deviceid parameter should be the encoded ID.
- * This means that framebuffers should pass it as
+ * This means that framebuffers should pass it as
* DIO_ENCODE_ID(DIO_ID_FBUFFER,DIO_ID2_TOPCAT)
* (or whatever); everybody else just uses DIO_ID_FOOBAR.
* unsigned long dio_scodetophysaddr(int scode)
* Return the physical address corresponding to the given select code.
* int dio_scodetoipl(int scode)
- * Every DIO card has a fixed interrupt priority level. This function
+ * Every DIO card has a fixed interrupt priority level. This function
* returns it, whatever it is.
* const char *dio_scodetoname(int scode)
- * Return a character string describing this board [might be "" if
+ * Return a character string describing this board [might be "" if
* not CONFIG_DIO_CONSTANTS]
* void dio_config_board(int scode) mark board as configured in the list
* void dio_unconfig_board(int scode) mark board as no longer configured
*
- * This file is based on the way the Amiga port handles Zorro II cards,
+ * This file is based on the way the Amiga port handles Zorro II cards,
* although we aren't so complicated...
*/
#include <linux/module.h>
@@ -33,7 +33,7 @@
#include <linux/dio.h>
#include <linux/slab.h> /* kmalloc() */
#include <linux/uaccess.h>
-#include <asm/io.h> /* readb() */
+#include <linux/io.h> /* readb() */
struct dio_bus dio_bus = {
.resources = {
@@ -52,38 +52,36 @@ struct dio_bus dio_bus = {
/* We associate each numeric ID with an appropriate descriptive string
* using a constant array of these structs.
* FIXME: we should be able to arrange to throw away most of the strings
- * using the initdata stuff. Then we wouldn't need to worry about
+ * using the initdata stuff. Then we wouldn't need to worry about
* carrying them around...
- * I think we do this by copying them into newly kmalloc()ed memory and
+ * I think we do this by copying them into newly kmalloc()ed memory and
* marking the names[] array as .initdata ?
*/
-struct dioname
-{
- int id;
- const char *name;
+struct dioname {
+ int id;
+ const char *name;
};
/* useful macro */
#define DIONAME(x) { DIO_ID_##x, DIO_DESC_##x }
-#define DIOFBNAME(x) { DIO_ENCODE_ID( DIO_ID_FBUFFER, DIO_ID2_##x), DIO_DESC2_##x }
-
-static struct dioname names[] =
-{
- DIONAME(DCA0), DIONAME(DCA0REM), DIONAME(DCA1), DIONAME(DCA1REM),
- DIONAME(DCM), DIONAME(DCMREM),
- DIONAME(LAN),
- DIONAME(FHPIB), DIONAME(NHPIB),
- DIONAME(SCSI0), DIONAME(SCSI1), DIONAME(SCSI2), DIONAME(SCSI3),
- DIONAME(FBUFFER),
- DIONAME(PARALLEL), DIONAME(VME), DIONAME(DCL), DIONAME(DCLREM),
- DIONAME(MISC0), DIONAME(MISC1), DIONAME(MISC2), DIONAME(MISC3),
- DIONAME(MISC4), DIONAME(MISC5), DIONAME(MISC6), DIONAME(MISC7),
- DIONAME(MISC8), DIONAME(MISC9), DIONAME(MISC10), DIONAME(MISC11),
- DIONAME(MISC12), DIONAME(MISC13),
- DIOFBNAME(GATORBOX), DIOFBNAME(TOPCAT), DIOFBNAME(RENAISSANCE),
- DIOFBNAME(LRCATSEYE), DIOFBNAME(HRCCATSEYE), DIOFBNAME(HRMCATSEYE),
- DIOFBNAME(DAVINCI), DIOFBNAME(XXXCATSEYE), DIOFBNAME(HYPERION),
- DIOFBNAME(XGENESIS), DIOFBNAME(TIGER), DIOFBNAME(YGENESIS)
+#define DIOFBNAME(x) { DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_##x), DIO_DESC2_##x }
+
+static struct dioname names[] = {
+ DIONAME(DCA0), DIONAME(DCA0REM), DIONAME(DCA1), DIONAME(DCA1REM),
+ DIONAME(DCM), DIONAME(DCMREM),
+ DIONAME(LAN),
+ DIONAME(FHPIB), DIONAME(NHPIB),
+ DIONAME(SCSI0), DIONAME(SCSI1), DIONAME(SCSI2), DIONAME(SCSI3),
+ DIONAME(FBUFFER),
+ DIONAME(PARALLEL), DIONAME(VME), DIONAME(DCL), DIONAME(DCLREM),
+ DIONAME(MISC0), DIONAME(MISC1), DIONAME(MISC2), DIONAME(MISC3),
+ DIONAME(MISC4), DIONAME(MISC5), DIONAME(MISC6), DIONAME(MISC7),
+ DIONAME(MISC8), DIONAME(MISC9), DIONAME(MISC10), DIONAME(MISC11),
+ DIONAME(MISC12), DIONAME(MISC13),
+ DIOFBNAME(GATORBOX), DIOFBNAME(TOPCAT), DIOFBNAME(RENAISSANCE),
+ DIOFBNAME(LRCATSEYE), DIOFBNAME(HRCCATSEYE), DIOFBNAME(HRMCATSEYE),
+ DIOFBNAME(DAVINCI), DIOFBNAME(XXXCATSEYE), DIOFBNAME(HYPERION),
+ DIOFBNAME(XGENESIS), DIOFBNAME(TIGER), DIOFBNAME(YGENESIS)
};
#undef DIONAME
@@ -94,13 +92,14 @@ static const char unknowndioname[]
static const char *dio_getname(int id)
{
- /* return pointer to a constant string describing the board with given ID */
+ /* return pointer to a constant string describing the board with given ID */
unsigned int i;
+
for (i = 0; i < ARRAY_SIZE(names); i++)
- if (names[i].id == id)
- return names[i].name;
+ if (names[i].id == id)
+ return names[i].name;
- return unknowndioname;
+ return unknowndioname;
}
#else
@@ -122,10 +121,10 @@ int __init dio_find(int deviceid)
void *va;
unsigned long pa;
- if (DIO_SCINHOLE(scode))
- continue;
+ if (DIO_SCINHOLE(scode))
+ continue;
- pa = dio_scodetophysaddr(scode);
+ pa = dio_scodetophysaddr(scode);
if (!pa)
continue;
@@ -139,15 +138,15 @@ int __init dio_find(int deviceid)
(unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
- continue; /* no board present at that select code */
+ continue; /* no board present at that select code */
}
prid = DIO_ID(va);
- if (DIO_NEEDSSECID(prid)) {
- secid = DIO_SECID(va);
- id = DIO_ENCODE_ID(prid, secid);
- } else
+ if (DIO_NEEDSSECID(prid)) {
+ secid = DIO_SECID(va);
+ id = DIO_ENCODE_ID(prid, secid);
+ } else
id = prid;
if (id == deviceid) {
@@ -175,7 +174,7 @@ static int __init dio_init(void)
printk(KERN_INFO "Scanning for DIO devices...\n");
- /* Initialize the DIO bus */
+ /* Initialize the DIO bus */
INIT_LIST_HEAD(&dio_bus.devices);
dev_set_name(&dio_bus.dev, "dio");
error = device_register(&dio_bus.dev);
@@ -190,14 +189,13 @@ static int __init dio_init(void)
request_resource(&iomem_resource, &dio_bus.resources[i]);
/* Register all devices */
- for (scode = 0; scode < DIO_SCMAX; ++scode)
- {
- u_char prid, secid = 0; /* primary, secondary ID bytes */
- u_char *va;
+ for (scode = 0; scode < DIO_SCMAX; ++scode) {
+ u_char prid, secid = 0; /* primary, secondary ID bytes */
+ u_char *va;
unsigned long pa;
-
- if (DIO_SCINHOLE(scode))
- continue;
+
+ if (DIO_SCINHOLE(scode))
+ continue;
pa = dio_scodetophysaddr(scode);
@@ -213,10 +211,10 @@ static int __init dio_init(void)
(unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
- continue; /* no board present at that select code */
+ continue; /* no board present at that select code */
}
- /* Found a board, allocate it an entry in the list */
+ /* Found a board, allocate it an entry in the list */
dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -229,19 +227,19 @@ static int __init dio_init(void)
dev->resource.end = pa + DIO_SIZE(scode, va);
dev_set_name(&dev->dev, "%02x", scode);
- /* read the ID byte(s) and encode if necessary. */
+ /* read the ID byte(s) and encode if necessary. */
prid = DIO_ID(va);
- if (DIO_NEEDSSECID(prid)) {
- secid = DIO_SECID(va);
- dev->id = DIO_ENCODE_ID(prid, secid);
- } else
- dev->id = prid;
+ if (DIO_NEEDSSECID(prid)) {
+ secid = DIO_SECID(va);
+ dev->id = DIO_ENCODE_ID(prid, secid);
+ } else
+ dev->id = prid;
- dev->ipl = DIO_IPL(va);
- strcpy(dev->name,dio_getname(dev->id));
+ dev->ipl = DIO_IPL(va);
+ strcpy(dev->name, dio_getname(dev->id));
printk(KERN_INFO "select code %3d: ipl %d: ID %02X", dev->scode, dev->ipl, prid);
- if (DIO_NEEDSSECID(prid))
+ if (DIO_NEEDSSECID(prid))
printk(":%02X", secid);
printk(": %s\n", dev->name);
@@ -256,7 +254,7 @@ static int __init dio_init(void)
error = dio_create_sysfs_dev_files(dev);
if (error)
dev_err(&dev->dev, "Error creating sysfs files\n");
- }
+ }
return 0;
}
@@ -267,12 +265,12 @@ subsys_initcall(dio_init);
*/
unsigned long dio_scodetophysaddr(int scode)
{
- if (scode >= DIOII_SCBASE) {
- return (DIOII_BASE + (scode - 132) * DIOII_DEVSIZE);
- } else if (scode > DIO_SCMAX || scode < 0)
- return 0;
- else if (DIO_SCINHOLE(scode))
- return 0;
-
- return (DIO_BASE + scode * DIO_DEVSIZE);
+ if (scode >= DIOII_SCBASE)
+ return (DIOII_BASE + (scode - 132) * DIOII_DEVSIZE);
+ else if (scode > DIO_SCMAX || scode < 0)
+ return 0;
+ else if (DIO_SCINHOLE(scode))
+ return 0;
+
+ return (DIO_BASE + scode * DIO_DEVSIZE);
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 29c0a616b317..4bd57a908efe 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -306,6 +306,7 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
break;
case COMMAND_RSU_RETRY:
case COMMAND_RSU_MAX_RETRY:
+ case COMMAND_FIRMWARE_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
@@ -422,6 +423,11 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+ case COMMAND_FIRMWARE_VERSION:
+ a0 = INTEL_SIP_SMC_FIRMWARE_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -491,7 +497,8 @@ static int svc_normal_to_secure_thread(void *data)
*/
if ((pdata->command == COMMAND_RSU_RETRY) ||
(pdata->command == COMMAND_RSU_MAX_RETRY) ||
- (pdata->command == COMMAND_RSU_NOTIFY)) {
+ (pdata->command == COMMAND_RSU_NOTIFY) ||
+ (pdata->command == COMMAND_FIRMWARE_VERSION)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 450c5f6a1cbf..7d8cb2ec6f8e 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -42,6 +42,16 @@ static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static struct platform_device *em_dev;
/**
+ * struct zynqmp_devinfo - Structure for Zynqmp device instance
+ * @dev: Device Pointer
+ * @feature_conf_id: Feature conf id
+ */
+struct zynqmp_devinfo {
+ struct device *dev;
+ u32 feature_conf_id;
+};
+
+/**
* struct pm_api_feature_data - PM API Feature data
* @pm_api_id: PM API Id, used as key to index into hashmap
* @feature_status: status of PM API feature: valid, invalid
@@ -1157,6 +1167,33 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
}
/**
+ * zynqmp_pm_set_feature_config - PM call to request IOCTL for feature config
+ * @id: The config ID of the feature to be configured
+ * @value: The config value of the feature to be configured
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG,
+ id, value, NULL);
+}
+
+/**
+ * zynqmp_pm_get_feature_config - PM call to get value of configured feature
+ * @id: The config id of the feature to be queried
+ * @payload: Returned value array
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG,
+ id, 0, payload);
+}
+
+/**
* struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
* @subtype: Shutdown subtype
* @name: Matching string for scope argument
@@ -1424,6 +1461,78 @@ static DEVICE_ATTR_RW(pggs1);
static DEVICE_ATTR_RW(pggs2);
static DEVICE_ATTR_RW(pggs3);
+static ssize_t feature_config_id_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%d\n", devinfo->feature_conf_id);
+}
+
+static ssize_t feature_config_id_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 config_id;
+ int ret;
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = kstrtou32(buf, 10, &config_id);
+ if (ret)
+ return ret;
+
+ devinfo->feature_conf_id = config_id;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(feature_config_id);
+
+static ssize_t feature_config_value_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ ret = zynqmp_pm_get_feature_config(devinfo->feature_conf_id,
+ ret_payload);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", ret_payload[1]);
+}
+
+static ssize_t feature_config_value_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ int ret;
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = kstrtou32(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_feature_config(devinfo->feature_conf_id,
+ value);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(feature_config_value);
+
static struct attribute *zynqmp_firmware_attrs[] = {
&dev_attr_ggs0.attr,
&dev_attr_ggs1.attr,
@@ -1435,6 +1544,8 @@ static struct attribute *zynqmp_firmware_attrs[] = {
&dev_attr_pggs3.attr,
&dev_attr_shutdown_scope.attr,
&dev_attr_health_status.attr,
+ &dev_attr_feature_config_id.attr,
+ &dev_attr_feature_config_value.attr,
NULL,
};
@@ -1444,6 +1555,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
+ struct zynqmp_devinfo *devinfo;
int ret;
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
@@ -1460,6 +1572,14 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
if (ret)
return ret;
+ devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
+ if (!devinfo)
+ return -ENOMEM;
+
+ devinfo->dev = dev;
+
+ platform_set_drvdata(pdev, devinfo);
+
/* Check PM API version number */
ret = zynqmp_pm_get_api_version(&pm_api_version);
if (ret)
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 59ddc9fd5bca..3a7b78e36701 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -24,9 +24,6 @@
#include "fsi-master.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/fsi.h>
-
#define FSI_SLAVE_CONF_NEXT_MASK GENMASK(31, 31)
#define FSI_SLAVE_CONF_SLOTS_MASK GENMASK(23, 16)
#define FSI_SLAVE_CONF_SLOTS_SHIFT 16
@@ -95,6 +92,9 @@ struct fsi_slave {
u8 t_echo_delay;
};
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi.h>
+
#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
@@ -524,6 +524,8 @@ static int fsi_slave_scan(struct fsi_slave *slave)
dev->addr = engine_addr;
dev->size = slots * engine_page_size;
+ trace_fsi_dev_init(dev);
+
dev_dbg(&slave->dev,
"engine[%i]: type %x, version %x, addr %x size %x\n",
dev->unit, dev->engine_type, version,
@@ -1006,6 +1008,7 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
crc = crc4(0, cfam_id, 32);
if (crc) {
+ trace_fsi_slave_invalid_cfam(master, link, cfam_id);
dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
link, id);
return -EIO;
@@ -1080,6 +1083,8 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (rc)
goto err_free;
+ trace_fsi_slave_init(slave);
+
/* Create chardev for userspace access */
cdev_init(&slave->cdev, &cfam_fops);
rc = cdev_device_add(&slave->cdev, &slave->dev);
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index 8606e55c1721..7cec1772820d 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -449,11 +449,13 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
{
struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
+ trace_fsi_master_aspeed_cfam_reset(true);
mutex_lock(&aspeed->lock);
gpiod_set_value(aspeed->cfam_reset_gpio, 1);
usleep_range(900, 1000);
gpiod_set_value(aspeed->cfam_reset_gpio, 0);
mutex_unlock(&aspeed->lock);
+ trace_fsi_master_aspeed_cfam_reset(false);
return count;
}
@@ -542,25 +544,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
return rc;
}
- aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL);
if (!aspeed)
return -ENOMEM;
aspeed->dev = &pdev->dev;
aspeed->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(aspeed->base))
- return PTR_ERR(aspeed->base);
+ if (IS_ERR(aspeed->base)) {
+ rc = PTR_ERR(aspeed->base);
+ goto err_free_aspeed;
+ }
aspeed->clk = devm_clk_get(aspeed->dev, NULL);
if (IS_ERR(aspeed->clk)) {
dev_err(aspeed->dev, "couldn't get clock\n");
- return PTR_ERR(aspeed->clk);
+ rc = PTR_ERR(aspeed->clk);
+ goto err_free_aspeed;
}
rc = clk_prepare_enable(aspeed->clk);
if (rc) {
dev_err(aspeed->dev, "couldn't enable clock\n");
- return rc;
+ goto err_free_aspeed;
}
rc = setup_cfam_reset(aspeed);
@@ -595,7 +600,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
if (rc) {
dev_err(&pdev->dev, "failed to read hub version\n");
- return rc;
+ goto err_release;
}
reg = be32_to_cpu(raw);
@@ -634,6 +639,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
err_release:
clk_disable_unprepare(aspeed->clk);
+err_free_aspeed:
+ kfree(aspeed);
return rc;
}
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index 7eaab1be0aa4..c9cc75fbdfb9 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -451,6 +451,14 @@ static int occ_trigger_attn(struct occ *occ)
return rc;
}
+static bool fsi_occ_response_not_ready(struct occ_response *resp, u8 seq_no,
+ u8 cmd_type)
+{
+ return resp->return_status == OCC_RESP_CMD_IN_PRG ||
+ resp->return_status == OCC_RESP_CRIT_INIT ||
+ resp->seq_no != seq_no || resp->cmd_type != cmd_type;
+}
+
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len)
{
@@ -461,10 +469,11 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
struct occ_response *resp = response;
size_t user_resp_len = *resp_len;
u8 seq_no;
+ u8 cmd_type;
u16 checksum = 0;
u16 resp_data_length;
const u8 *byte_request = (const u8 *)request;
- unsigned long start;
+ unsigned long end;
int rc;
size_t i;
@@ -478,6 +487,8 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
return -EINVAL;
}
+ cmd_type = byte_request[1];
+
/* Checksum the request, ignoring first byte (sequence number). */
for (i = 1; i < req_len - 2; ++i)
checksum += byte_request[i];
@@ -509,51 +520,61 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
if (rc)
goto done;
- /* Read occ response header */
- start = jiffies;
- do {
+ end = jiffies + timeout;
+ while (true) {
+ /* Read occ response header */
rc = occ_getsram(occ, 0, resp, 8);
if (rc)
goto done;
- if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
- resp->return_status == OCC_RESP_CRIT_INIT ||
- resp->seq_no != seq_no) {
- rc = -ETIMEDOUT;
-
- if (time_after(jiffies, start + timeout)) {
- dev_err(occ->dev, "resp timeout status=%02x "
- "resp seq_no=%d our seq_no=%d\n",
+ if (fsi_occ_response_not_ready(resp, seq_no, cmd_type)) {
+ if (time_after(jiffies, end)) {
+ dev_err(occ->dev,
+ "resp timeout status=%02x seq=%d cmd=%d, our seq=%d cmd=%d\n",
resp->return_status, resp->seq_no,
- seq_no);
+ resp->cmd_type, seq_no, cmd_type);
+ rc = -ETIMEDOUT;
goto done;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait_time);
- }
- } while (rc);
-
- /* Extract size of response data */
- resp_data_length = get_unaligned_be16(&resp->data_length);
+ } else {
+ /* Extract size of response data */
+ resp_data_length =
+ get_unaligned_be16(&resp->data_length);
+
+ /*
+ * Message size is data length + 5 bytes header + 2
+ * bytes checksum
+ */
+ if ((resp_data_length + 7) > user_resp_len) {
+ rc = -EMSGSIZE;
+ goto done;
+ }
- /* Message size is data length + 5 bytes header + 2 bytes checksum */
- if ((resp_data_length + 7) > user_resp_len) {
- rc = -EMSGSIZE;
- goto done;
+ /*
+ * Get the entire response including the header again,
+ * in case it changed
+ */
+ if (resp_data_length > 1) {
+ rc = occ_getsram(occ, 0, resp,
+ resp_data_length + 7);
+ if (rc)
+ goto done;
+
+ if (!fsi_occ_response_not_ready(resp, seq_no,
+ cmd_type))
+ break;
+ } else {
+ break;
+ }
+ }
}
dev_dbg(dev, "resp_status=%02x resp_data_len=%d\n",
resp->return_status, resp_data_length);
- /* Grab the rest */
- if (resp_data_length > 1) {
- /* already got 3 bytes resp, also need 2 bytes checksum */
- rc = occ_getsram(occ, 8, &resp->data[3], resp_data_length - 1);
- if (rc)
- goto done;
- }
-
occ->client_response_size = resp_data_length + 7;
rc = occ_verify_checksum(occ, resp, resp_data_length);
@@ -598,7 +619,11 @@ static int occ_probe(struct platform_device *pdev)
occ->version = (uintptr_t)of_device_get_match_data(dev);
occ->dev = dev;
occ->sbefifo = dev->parent;
- occ->sequence_number = 1;
+ /*
+ * Quickly derive a pseudo-random number from jiffies so that
+ * re-probing the driver doesn't accidentally overlap sequence numbers.
+ */
+ occ->sequence_number = (u8)((jiffies % 0xff) + 1);
mutex_init(&occ->occ_lock);
if (dev->of_node) {
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index 52328adef643..f52a912cdf16 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -32,6 +32,8 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
+#include <uapi/linux/fsi.h>
+
/*
* The SBEFIFO is a pipe-like FSI device for communicating with
* the self boot engine on POWER processors.
@@ -125,6 +127,7 @@ struct sbefifo {
bool dead;
bool async_ffdc;
bool timed_out;
+ u32 timeout_start_rsp_ms;
};
struct sbefifo_user {
@@ -133,6 +136,7 @@ struct sbefifo_user {
void *cmd_page;
void *pending_cmd;
size_t pending_len;
+ u32 read_timeout_ms;
};
static DEFINE_MUTEX(sbefifo_ffdc_mutex);
@@ -549,7 +553,7 @@ static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *respo
dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
- timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
+ timeout = msecs_to_jiffies(sbefifo->timeout_start_rsp_ms);
for (;;) {
/* Grab FIFO status (this will handle parity errors) */
rc = sbefifo_wait(sbefifo, false, &status, timeout);
@@ -795,6 +799,7 @@ static int sbefifo_user_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
mutex_init(&user->file_lock);
+ user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
return 0;
}
@@ -837,7 +842,9 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
rc = mutex_lock_interruptible(&sbefifo->lock);
if (rc)
goto bail;
+ sbefifo->timeout_start_rsp_ms = user->read_timeout_ms;
rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
+ sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
mutex_unlock(&sbefifo->lock);
if (rc < 0)
goto bail;
@@ -927,12 +934,55 @@ static int sbefifo_user_release(struct inode *inode, struct file *file)
return 0;
}
+static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
+{
+ struct device *dev = &user->sbefifo->dev;
+ u32 timeout;
+
+ if (get_user(timeout, (__u32 __user *)argp))
+ return -EFAULT;
+
+ if (timeout == 0) {
+ user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
+ dev_dbg(dev, "Timeout reset to %d\n", user->read_timeout_ms);
+ return 0;
+ }
+
+ if (timeout < 10 || timeout > 120)
+ return -EINVAL;
+
+ user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */
+
+ dev_dbg(dev, "Timeout set to %d\n", user->read_timeout_ms);
+
+ return 0;
+}
+
+static long sbefifo_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sbefifo_user *user = file->private_data;
+ int rc = -ENOTTY;
+
+ if (!user)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+ switch (cmd) {
+ case FSI_SBEFIFO_READ_TIMEOUT_SECONDS:
+ rc = sbefifo_read_timeout(user, (void __user *)arg);
+ break;
+ }
+ mutex_unlock(&user->file_lock);
+ return rc;
+}
+
static const struct file_operations sbefifo_fops = {
.owner = THIS_MODULE,
.open = sbefifo_user_open,
.read = sbefifo_user_read,
.write = sbefifo_user_write,
.release = sbefifo_user_release,
+ .unlocked_ioctl = sbefifo_user_ioctl,
};
static void sbefifo_free(struct device *dev)
@@ -972,6 +1022,7 @@ static int sbefifo_probe(struct device *dev)
sbefifo->fsi_dev = fsi_dev;
dev_set_drvdata(dev, sbefifo);
mutex_init(&sbefifo->lock);
+ sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
/*
* Try cleaning up the FIFO. If this fails, we still register the
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index da1486bb6a14..bcb756dc9866 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -145,7 +145,7 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
uint64_t addr, uint32_t *status)
{
uint64_t ind_data, ind_addr;
- int rc, retries, err = 0;
+ int rc, err;
if (value & ~XSCOM_DATA_IND_DATA)
return -EINVAL;
@@ -156,19 +156,14 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
- rc = __get_scom(scom, &ind_data, addr, status);
- if (rc || (*status & SCOM_STATUS_ANY_ERR))
- return rc;
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
- *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
- return 0;
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
- msleep(1);
- }
- return rc;
+ return 0;
}
static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
@@ -188,7 +183,7 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
uint64_t addr, uint32_t *status)
{
uint64_t ind_data, ind_addr;
- int rc, retries, err = 0;
+ int rc, err;
ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
@@ -196,21 +191,15 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
- rc = __get_scom(scom, &ind_data, addr, status);
- if (rc || (*status & SCOM_STATUS_ANY_ERR))
- return rc;
-
- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
- *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
- *value = ind_data & XSCOM_DATA_IND_DATA;
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
- return 0;
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ *value = ind_data & XSCOM_DATA_IND_DATA;
- msleep(1);
- }
- return rc;
+ return 0;
}
static int raw_put_scom(struct scom_device *scom, uint64_t value,
@@ -289,7 +278,7 @@ static int put_scom(struct scom_device *scom, uint64_t value,
int rc;
rc = raw_put_scom(scom, value, addr, &status);
- if (rc == -ENODEV)
+ if (rc)
return rc;
rc = handle_fsi2pib_status(scom, status);
@@ -308,7 +297,7 @@ static int get_scom(struct scom_device *scom, uint64_t *value,
int rc;
rc = raw_get_scom(scom, value, addr, &status);
- if (rc == -ENODEV)
+ if (rc)
return rc;
rc = handle_fsi2pib_status(scom, status);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1c211b4c63be..c822cf6146cf 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1346,6 +1346,18 @@ config GPIO_TIMBERDALE
help
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_TN48M_CPLD
+ tristate "Delta Networks TN48M switch CPLD GPIO driver"
+ depends on MFD_TN48M_CPLD
+ select GPIO_REGMAP
+ help
+ This enables support for the GPIOs found on the Delta
+ Networks TN48M switch Lattice CPLD. It provides 12 pins in total,
+ they are input-only or output-only type.
+
+ This driver can also be built as a module. If so, the
+ module will be called gpio-tn48m.
+
config GPIO_TPS65086
tristate "TI TPS65086 GPO"
depends on MFD_TPS65086
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index edbaa3cb343c..3b68a9808154 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -148,6 +148,7 @@ obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
+obj-$(CONFIG_GPIO_TN48M_CPLD) += gpio-tn48m.o
obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
diff --git a/drivers/gpio/gpio-tn48m.c b/drivers/gpio/gpio-tn48m.c
new file mode 100644
index 000000000000..cd4a80b22794
--- /dev/null
+++ b/drivers/gpio/gpio-tn48m.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Delta TN48M CPLD GPIO driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+enum tn48m_gpio_type {
+ TN48M_GP0 = 1,
+ TN48M_GPI,
+};
+
+struct tn48m_gpio_config {
+ int ngpio;
+ int ngpio_per_reg;
+ enum tn48m_gpio_type type;
+};
+
+static const struct tn48m_gpio_config tn48m_gpo_config = {
+ .ngpio = 4,
+ .ngpio_per_reg = 4,
+ .type = TN48M_GP0,
+};
+
+static const struct tn48m_gpio_config tn48m_gpi_config = {
+ .ngpio = 4,
+ .ngpio_per_reg = 4,
+ .type = TN48M_GPI,
+};
+
+static int tn48m_gpio_probe(struct platform_device *pdev)
+{
+ const struct tn48m_gpio_config *gpio_config;
+ struct gpio_regmap_config config = {};
+ struct regmap *regmap;
+ u32 base;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ gpio_config = device_get_match_data(&pdev->dev);
+ if (!gpio_config)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return ret;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ config.regmap = regmap;
+ config.parent = &pdev->dev;
+ config.ngpio = gpio_config->ngpio;
+ config.ngpio_per_reg = gpio_config->ngpio_per_reg;
+ switch (gpio_config->type) {
+ case TN48M_GP0:
+ config.reg_set_base = base;
+ break;
+ case TN48M_GPI:
+ config.reg_dat_base = base;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&pdev->dev, &config));
+}
+
+static const struct of_device_id tn48m_gpio_of_match[] = {
+ { .compatible = "delta,tn48m-gpo", .data = &tn48m_gpo_config },
+ { .compatible = "delta,tn48m-gpi", .data = &tn48m_gpi_config },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tn48m_gpio_of_match);
+
+static struct platform_driver tn48m_gpio_driver = {
+ .driver = {
+ .name = "delta-tn48m-gpio",
+ .of_match_table = tn48m_gpio_of_match,
+ },
+ .probe = tn48m_gpio_probe,
+};
+module_platform_driver(tn48m_gpio_driver);
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("Delta TN48M CPLD GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index ce7740ef449b..56d2b44d6fef 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -861,16 +861,26 @@ static int gb_svc_hello(struct gb_operation *op)
ret = gb_svc_watchdog_create(svc);
if (ret) {
dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
- goto err_unregister_device;
+ goto err_deregister_svc;
}
+ /*
+ * FIXME: This is a temporary hack to reconfigure the link at HELLO
+ * (which abuses the deferred request processing mechanism).
+ */
+ ret = gb_svc_queue_deferred_request(op);
+ if (ret)
+ goto err_destroy_watchdog;
+
gb_svc_debugfs_init(svc);
- return gb_svc_queue_deferred_request(op);
+ return 0;
-err_unregister_device:
+err_destroy_watchdog:
gb_svc_watchdog_destroy(svc);
+err_deregister_svc:
device_del(&svc->dev);
+
return ret;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8df25f1079ba..af6c3bd65ebd 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1538,6 +1538,8 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+source "drivers/hwmon/peci/Kconfig"
+
source "drivers/hwmon/pmbus/Kconfig"
config SENSORS_PWM_FAN
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 185f946d698b..6139e5a5aa00 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -208,6 +208,7 @@ obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o
obj-$(CONFIG_SENSORS_OCC) += occ/
+obj-$(CONFIG_SENSORS_PECI) += peci/
obj-$(CONFIG_PMBUS) += pmbus/
ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
diff --git a/drivers/hwmon/peci/Kconfig b/drivers/hwmon/peci/Kconfig
new file mode 100644
index 000000000000..9d32a57badfe
--- /dev/null
+++ b/drivers/hwmon/peci/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SENSORS_PECI_CPUTEMP
+ tristate "PECI CPU temperature monitoring client"
+ depends on PECI
+ select SENSORS_PECI
+ select PECI_CPU
+ help
+ If you say yes here you get support for the generic Intel PECI
+ cputemp driver which provides Digital Thermal Sensor (DTS) thermal
+ readings of the CPU package and CPU cores that are accessible via
+ the processor PECI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-cputemp.
+
+config SENSORS_PECI_DIMMTEMP
+ tristate "PECI DIMM temperature monitoring client"
+ depends on PECI
+ select SENSORS_PECI
+ select PECI_CPU
+ help
+ If you say yes here you get support for the generic Intel PECI hwmon
+ driver which provides Temperature Sensor on DIMM readings that are
+ accessible via the processor PECI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-dimmtemp.
+
+config SENSORS_PECI
+ tristate
diff --git a/drivers/hwmon/peci/Makefile b/drivers/hwmon/peci/Makefile
new file mode 100644
index 000000000000..191cfa0227f3
--- /dev/null
+++ b/drivers/hwmon/peci/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+peci-cputemp-y := cputemp.o
+peci-dimmtemp-y := dimmtemp.o
+
+obj-$(CONFIG_SENSORS_PECI_CPUTEMP) += peci-cputemp.o
+obj-$(CONFIG_SENSORS_PECI_DIMMTEMP) += peci-dimmtemp.o
diff --git a/drivers/hwmon/peci/common.h b/drivers/hwmon/peci/common.h
new file mode 100644
index 000000000000..734506b0eca2
--- /dev/null
+++ b/drivers/hwmon/peci/common.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#ifndef __PECI_HWMON_COMMON_H
+#define __PECI_HWMON_COMMON_H
+
+#define PECI_HWMON_UPDATE_INTERVAL HZ
+
+/**
+ * struct peci_sensor_state - PECI state information
+ * @valid: flag to indicate the sensor value is valid
+ * @last_updated: time of the last update in jiffies
+ * @lock: mutex to protect sensor access
+ */
+struct peci_sensor_state {
+ bool valid;
+ unsigned long last_updated;
+ struct mutex lock; /* protect sensor access */
+};
+
+/**
+ * struct peci_sensor_data - PECI sensor information
+ * @value: sensor value in milli units
+ * @state: sensor update state
+ */
+
+struct peci_sensor_data {
+ s32 value;
+ struct peci_sensor_state state;
+};
+
+/**
+ * peci_sensor_need_update() - check whether sensor update is needed or not
+ * @sensor: pointer to sensor data struct
+ *
+ * Return: true if update is needed, false if not.
+ */
+
+static inline bool peci_sensor_need_update(struct peci_sensor_state *state)
+{
+ return !state->valid ||
+ time_after(jiffies, state->last_updated + PECI_HWMON_UPDATE_INTERVAL);
+}
+
+/**
+ * peci_sensor_mark_updated() - mark the sensor is updated
+ * @sensor: pointer to sensor data struct
+ */
+static inline void peci_sensor_mark_updated(struct peci_sensor_state *state)
+{
+ state->valid = true;
+ state->last_updated = jiffies;
+}
+
+#endif /* __PECI_HWMON_COMMON_H */
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
new file mode 100644
index 000000000000..12156328f5cf
--- /dev/null
+++ b/drivers/hwmon/peci/cputemp.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2021 Intel Corporation
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/peci.h>
+#include <linux/peci-cpu.h>
+#include <linux/units.h>
+
+#include "common.h"
+
+#define CORE_NUMS_MAX 64
+
+#define BASE_CHANNEL_NUMS 5
+#define CPUTEMP_CHANNEL_NUMS (BASE_CHANNEL_NUMS + CORE_NUMS_MAX)
+
+#define TEMP_TARGET_FAN_TEMP_MASK GENMASK(15, 8)
+#define TEMP_TARGET_REF_TEMP_MASK GENMASK(23, 16)
+#define TEMP_TARGET_TJ_OFFSET_MASK GENMASK(29, 24)
+
+#define DTS_MARGIN_MASK GENMASK(15, 0)
+#define PCS_MODULE_TEMP_MASK GENMASK(15, 0)
+
+struct resolved_cores_reg {
+ u8 bus;
+ u8 dev;
+ u8 func;
+ u8 offset;
+};
+
+struct cpu_info {
+ struct resolved_cores_reg *reg;
+ u8 min_peci_revision;
+ s32 (*thermal_margin_to_millidegree)(u16 val);
+};
+
+struct peci_temp_target {
+ s32 tcontrol;
+ s32 tthrottle;
+ s32 tjmax;
+ struct peci_sensor_state state;
+};
+
+enum peci_temp_target_type {
+ tcontrol_type,
+ tthrottle_type,
+ tjmax_type,
+ crit_hyst_type,
+};
+
+struct peci_cputemp {
+ struct peci_device *peci_dev;
+ struct device *dev;
+ const char *name;
+ const struct cpu_info *gen_info;
+ struct {
+ struct peci_temp_target target;
+ struct peci_sensor_data die;
+ struct peci_sensor_data dts;
+ struct peci_sensor_data core[CORE_NUMS_MAX];
+ } temp;
+ const char **coretemp_label;
+ DECLARE_BITMAP(core_mask, CORE_NUMS_MAX);
+};
+
+enum cputemp_channels {
+ channel_die,
+ channel_dts,
+ channel_tcontrol,
+ channel_tthrottle,
+ channel_tjmax,
+ channel_core,
+};
+
+static const char * const cputemp_label[BASE_CHANNEL_NUMS] = {
+ "Die",
+ "DTS",
+ "Tcontrol",
+ "Tthrottle",
+ "Tjmax",
+};
+
+static int update_temp_target(struct peci_cputemp *priv)
+{
+ s32 tthrottle_offset, tcontrol_margin;
+ u32 pcs;
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.target.state))
+ return 0;
+
+ ret = peci_pcs_read(priv->peci_dev, PECI_PCS_TEMP_TARGET, 0, &pcs);
+ if (ret)
+ return ret;
+
+ priv->temp.target.tjmax =
+ FIELD_GET(TEMP_TARGET_REF_TEMP_MASK, pcs) * MILLIDEGREE_PER_DEGREE;
+
+ tcontrol_margin = FIELD_GET(TEMP_TARGET_FAN_TEMP_MASK, pcs);
+ tcontrol_margin = sign_extend32(tcontrol_margin, 7) * MILLIDEGREE_PER_DEGREE;
+ priv->temp.target.tcontrol = priv->temp.target.tjmax - tcontrol_margin;
+
+ tthrottle_offset = FIELD_GET(TEMP_TARGET_TJ_OFFSET_MASK, pcs) * MILLIDEGREE_PER_DEGREE;
+ priv->temp.target.tthrottle = priv->temp.target.tjmax - tthrottle_offset;
+
+ peci_sensor_mark_updated(&priv->temp.target.state);
+
+ return 0;
+}
+
+static int get_temp_target(struct peci_cputemp *priv, enum peci_temp_target_type type, long *val)
+{
+ int ret;
+
+ mutex_lock(&priv->temp.target.state.lock);
+
+ ret = update_temp_target(priv);
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case tcontrol_type:
+ *val = priv->temp.target.tcontrol;
+ break;
+ case tthrottle_type:
+ *val = priv->temp.target.tthrottle;
+ break;
+ case tjmax_type:
+ *val = priv->temp.target.tjmax;
+ break;
+ case crit_hyst_type:
+ *val = priv->temp.target.tjmax - priv->temp.target.tcontrol;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&priv->temp.target.state.lock);
+
+ return ret;
+}
+
+/*
+ * Error codes:
+ * 0x8000: General sensor error
+ * 0x8001: Reserved
+ * 0x8002: Underflow on reading value
+ * 0x8003-0x81ff: Reserved
+ */
+static bool dts_valid(u16 val)
+{
+ return val < 0x8000 || val > 0x81ff;
+}
+
+/*
+ * Processors return a value of DTS reading in S10.6 fixed point format
+ * (16 bits: 10-bit signed magnitude, 6-bit fraction).
+ */
+static s32 dts_ten_dot_six_to_millidegree(u16 val)
+{
+ return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 64;
+}
+
+/*
+ * For older processors, thermal margin reading is returned in S8.8 fixed
+ * point format (16 bits: 8-bit signed magnitude, 8-bit fraction).
+ */
+static s32 dts_eight_dot_eight_to_millidegree(u16 val)
+{
+ return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 256;
+}
+
+static int get_die_temp(struct peci_cputemp *priv, long *val)
+{
+ int ret = 0;
+ long tjmax;
+ u16 temp;
+
+ mutex_lock(&priv->temp.die.state.lock);
+ if (!peci_sensor_need_update(&priv->temp.die.state))
+ goto skip_update;
+
+ ret = peci_temp_read(priv->peci_dev, &temp);
+ if (ret)
+ goto err_unlock;
+
+ if (!dts_valid(temp)) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
+ ret = get_temp_target(priv, tjmax_type, &tjmax);
+ if (ret)
+ goto err_unlock;
+
+ priv->temp.die.value = (s32)tjmax + dts_ten_dot_six_to_millidegree(temp);
+
+ peci_sensor_mark_updated(&priv->temp.die.state);
+
+skip_update:
+ *val = priv->temp.die.value;
+err_unlock:
+ mutex_unlock(&priv->temp.die.state.lock);
+ return ret;
+}
+
+static int get_dts(struct peci_cputemp *priv, long *val)
+{
+ int ret = 0;
+ u16 thermal_margin;
+ long tcontrol;
+ u32 pcs;
+
+ mutex_lock(&priv->temp.dts.state.lock);
+ if (!peci_sensor_need_update(&priv->temp.dts.state))
+ goto skip_update;
+
+ ret = peci_pcs_read(priv->peci_dev, PECI_PCS_THERMAL_MARGIN, 0, &pcs);
+ if (ret)
+ goto err_unlock;
+
+ thermal_margin = FIELD_GET(DTS_MARGIN_MASK, pcs);
+ if (!dts_valid(thermal_margin)) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
+ ret = get_temp_target(priv, tcontrol_type, &tcontrol);
+ if (ret)
+ goto err_unlock;
+
+ /* Note that the tcontrol should be available before calling it */
+ priv->temp.dts.value =
+ (s32)tcontrol - priv->gen_info->thermal_margin_to_millidegree(thermal_margin);
+
+ peci_sensor_mark_updated(&priv->temp.dts.state);
+
+skip_update:
+ *val = priv->temp.dts.value;
+err_unlock:
+ mutex_unlock(&priv->temp.dts.state.lock);
+ return ret;
+}
+
+static int get_core_temp(struct peci_cputemp *priv, int core_index, long *val)
+{
+ int ret = 0;
+ u16 core_dts_margin;
+ long tjmax;
+ u32 pcs;
+
+ mutex_lock(&priv->temp.core[core_index].state.lock);
+ if (!peci_sensor_need_update(&priv->temp.core[core_index].state))
+ goto skip_update;
+
+ ret = peci_pcs_read(priv->peci_dev, PECI_PCS_MODULE_TEMP, core_index, &pcs);
+ if (ret)
+ goto err_unlock;
+
+ core_dts_margin = FIELD_GET(PCS_MODULE_TEMP_MASK, pcs);
+ if (!dts_valid(core_dts_margin)) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
+ ret = get_temp_target(priv, tjmax_type, &tjmax);
+ if (ret)
+ goto err_unlock;
+
+ /* Note that the tjmax should be available before calling it */
+ priv->temp.core[core_index].value =
+ (s32)tjmax + dts_ten_dot_six_to_millidegree(core_dts_margin);
+
+ peci_sensor_mark_updated(&priv->temp.core[core_index].state);
+
+skip_update:
+ *val = priv->temp.core[core_index].value;
+err_unlock:
+ mutex_unlock(&priv->temp.core[core_index].state.lock);
+ return ret;
+}
+
+static int cputemp_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = channel < channel_core ?
+ cputemp_label[channel] : priv->coretemp_label[channel - channel_core];
+
+ return 0;
+}
+
+static int cputemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case channel_die:
+ return get_die_temp(priv, val);
+ case channel_dts:
+ return get_dts(priv, val);
+ case channel_tcontrol:
+ return get_temp_target(priv, tcontrol_type, val);
+ case channel_tthrottle:
+ return get_temp_target(priv, tthrottle_type, val);
+ case channel_tjmax:
+ return get_temp_target(priv, tjmax_type, val);
+ default:
+ return get_core_temp(priv, channel - channel_core, val);
+ }
+ break;
+ case hwmon_temp_max:
+ return get_temp_target(priv, tcontrol_type, val);
+ case hwmon_temp_crit:
+ return get_temp_target(priv, tjmax_type, val);
+ case hwmon_temp_crit_hyst:
+ return get_temp_target(priv, crit_hyst_type, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t cputemp_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_cputemp *priv = data;
+
+ if (channel > CPUTEMP_CHANNEL_NUMS)
+ return 0;
+
+ if (channel < channel_core)
+ return 0444;
+
+ if (test_bit(channel - channel_core, priv->core_mask))
+ return 0444;
+
+ return 0;
+}
+
+static int init_core_mask(struct peci_cputemp *priv)
+{
+ struct peci_device *peci_dev = priv->peci_dev;
+ struct resolved_cores_reg *reg = priv->gen_info->reg;
+ u64 core_mask;
+ u32 data;
+ int ret;
+
+ /* Get the RESOLVED_CORES register value */
+ switch (peci_dev->info.model) {
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_D:
+ ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev,
+ reg->func, reg->offset + 4, &data);
+ if (ret)
+ return ret;
+
+ core_mask = (u64)data << 32;
+
+ ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev,
+ reg->func, reg->offset, &data);
+ if (ret)
+ return ret;
+
+ core_mask |= data;
+
+ break;
+ default:
+ ret = peci_pci_local_read(peci_dev, reg->bus, reg->dev,
+ reg->func, reg->offset, &data);
+ if (ret)
+ return ret;
+
+ core_mask = data;
+
+ break;
+ }
+
+ if (!core_mask)
+ return -EIO;
+
+ bitmap_from_u64(priv->core_mask, core_mask);
+
+ return 0;
+}
+
+static int create_temp_label(struct peci_cputemp *priv)
+{
+ unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX);
+ int i;
+
+ priv->coretemp_label = devm_kzalloc(priv->dev, core_max * sizeof(char *), GFP_KERNEL);
+ if (!priv->coretemp_label)
+ return -ENOMEM;
+
+ for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX) {
+ priv->coretemp_label[i] = devm_kasprintf(priv->dev, GFP_KERNEL, "Core %d", i);
+ if (!priv->coretemp_label[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void check_resolved_cores(struct peci_cputemp *priv)
+{
+ /*
+ * Failure to resolve cores is non-critical, we're still able to
+ * provide other sensor data.
+ */
+
+ if (init_core_mask(priv))
+ return;
+
+ if (create_temp_label(priv))
+ bitmap_zero(priv->core_mask, CORE_NUMS_MAX);
+}
+
+static void sensor_init(struct peci_cputemp *priv)
+{
+ int i;
+
+ mutex_init(&priv->temp.target.state.lock);
+ mutex_init(&priv->temp.die.state.lock);
+ mutex_init(&priv->temp.dts.state.lock);
+
+ for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX)
+ mutex_init(&priv->temp.core[i].state.lock);
+}
+
+static const struct hwmon_ops peci_cputemp_ops = {
+ .is_visible = cputemp_is_visible,
+ .read_string = cputemp_read_string,
+ .read = cputemp_read,
+};
+
+static const u32 peci_cputemp_temp_channel_config[] = {
+ /* Die temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_CRIT_HYST,
+ /* DTS margin */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_CRIT_HYST,
+ /* Tcontrol temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_CRIT,
+ /* Tthrottle temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ /* Tjmax temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ /* Core temperature - for all core channels */
+ [channel_core ... CPUTEMP_CHANNEL_NUMS - 1] = HWMON_T_LABEL | HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info peci_cputemp_temp_channel = {
+ .type = hwmon_temp,
+ .config = peci_cputemp_temp_channel_config,
+};
+
+static const struct hwmon_channel_info *peci_cputemp_info[] = {
+ &peci_cputemp_temp_channel,
+ NULL
+};
+
+static const struct hwmon_chip_info peci_cputemp_chip_info = {
+ .ops = &peci_cputemp_ops,
+ .info = peci_cputemp_info,
+};
+
+static int peci_cputemp_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct peci_device *peci_dev = to_peci_device(dev->parent);
+ struct peci_cputemp *priv;
+ struct device *hwmon_dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_cputemp.cpu%d",
+ peci_dev->info.socket_id);
+ if (!priv->name)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->peci_dev = peci_dev;
+ priv->gen_info = (const struct cpu_info *)id->driver_data;
+
+ /*
+ * This is just a sanity check. Since we're using commands that are
+ * guaranteed to be supported on a given platform, we should never see
+ * revision lower than expected.
+ */
+ if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision)
+ dev_warn(priv->dev,
+ "Unexpected PECI revision %#x, some features may be unavailable\n",
+ peci_dev->info.peci_revision);
+
+ check_resolved_cores(priv);
+
+ sensor_init(priv);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name,
+ priv, &peci_cputemp_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+/*
+ * RESOLVED_CORES PCI configuration register may have different location on
+ * different platforms.
+ */
+static struct resolved_cores_reg resolved_cores_reg_hsx = {
+ .bus = 1,
+ .dev = 30,
+ .func = 3,
+ .offset = 0xb4,
+};
+
+static struct resolved_cores_reg resolved_cores_reg_icx = {
+ .bus = 14,
+ .dev = 30,
+ .func = 3,
+ .offset = 0xd0,
+};
+
+static const struct cpu_info cpu_hsx = {
+ .reg = &resolved_cores_reg_hsx,
+ .min_peci_revision = 0x33,
+ .thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
+};
+
+static const struct cpu_info cpu_icx = {
+ .reg = &resolved_cores_reg_icx,
+ .min_peci_revision = 0x40,
+ .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
+static const struct auxiliary_device_id peci_cputemp_ids[] = {
+ {
+ .name = "peci_cpu.cputemp.hsx",
+ .driver_data = (kernel_ulong_t)&cpu_hsx,
+ },
+ {
+ .name = "peci_cpu.cputemp.bdx",
+ .driver_data = (kernel_ulong_t)&cpu_hsx,
+ },
+ {
+ .name = "peci_cpu.cputemp.bdxd",
+ .driver_data = (kernel_ulong_t)&cpu_hsx,
+ },
+ {
+ .name = "peci_cpu.cputemp.skx",
+ .driver_data = (kernel_ulong_t)&cpu_hsx,
+ },
+ {
+ .name = "peci_cpu.cputemp.icx",
+ .driver_data = (kernel_ulong_t)&cpu_icx,
+ },
+ {
+ .name = "peci_cpu.cputemp.icxd",
+ .driver_data = (kernel_ulong_t)&cpu_icx,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, peci_cputemp_ids);
+
+static struct auxiliary_driver peci_cputemp_driver = {
+ .probe = peci_cputemp_probe,
+ .id_table = peci_cputemp_ids,
+};
+
+module_auxiliary_driver(peci_cputemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("PECI cputemp driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PECI_CPU);
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
new file mode 100644
index 000000000000..c8222354c005
--- /dev/null
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2021 Intel Corporation
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/peci.h>
+#include <linux/peci-cpu.h>
+#include <linux/units.h>
+#include <linux/workqueue.h>
+
+#include "common.h"
+
+#define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000)
+
+/* Max number of channel ranks and DIMM index per channel */
+#define CHAN_RANK_MAX_ON_HSX 8
+#define DIMM_IDX_MAX_ON_HSX 3
+#define CHAN_RANK_MAX_ON_BDX 4
+#define DIMM_IDX_MAX_ON_BDX 3
+#define CHAN_RANK_MAX_ON_BDXD 2
+#define DIMM_IDX_MAX_ON_BDXD 2
+#define CHAN_RANK_MAX_ON_SKX 6
+#define DIMM_IDX_MAX_ON_SKX 2
+#define CHAN_RANK_MAX_ON_ICX 8
+#define DIMM_IDX_MAX_ON_ICX 2
+#define CHAN_RANK_MAX_ON_ICXD 4
+#define DIMM_IDX_MAX_ON_ICXD 2
+
+#define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX
+#define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX
+#define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX)
+
+#define CPU_SEG_MASK GENMASK(23, 16)
+#define GET_CPU_SEG(x) (((x) & CPU_SEG_MASK) >> 16)
+#define CPU_BUS_MASK GENMASK(7, 0)
+#define GET_CPU_BUS(x) ((x) & CPU_BUS_MASK)
+
+#define DIMM_TEMP_MAX GENMASK(15, 8)
+#define DIMM_TEMP_CRIT GENMASK(23, 16)
+#define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8)
+#define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16)
+
+#define NO_DIMM_RETRY_COUNT_MAX 5
+
+struct peci_dimmtemp;
+
+struct dimm_info {
+ int chan_rank_max;
+ int dimm_idx_max;
+ u8 min_peci_revision;
+ int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order,
+ int chan_rank, u32 *data);
+};
+
+struct peci_dimm_thresholds {
+ long temp_max;
+ long temp_crit;
+ struct peci_sensor_state state;
+};
+
+enum peci_dimm_threshold_type {
+ temp_max_type,
+ temp_crit_type,
+};
+
+struct peci_dimmtemp {
+ struct peci_device *peci_dev;
+ struct device *dev;
+ const char *name;
+ const struct dimm_info *gen_info;
+ struct delayed_work detect_work;
+ struct {
+ struct peci_sensor_data temp;
+ struct peci_dimm_thresholds thresholds;
+ } dimm[DIMM_NUMS_MAX];
+ char **dimmtemp_label;
+ DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX);
+ u8 no_dimm_retry_count;
+};
+
+static u8 __dimm_temp(u32 reg, int dimm_order)
+{
+ return (reg >> (dimm_order * 8)) & 0xff;
+}
+
+static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val)
+{
+ int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
+ int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
+ int ret = 0;
+ u32 data;
+
+ mutex_lock(&priv->dimm[dimm_no].temp.state.lock);
+ if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state))
+ goto skip_update;
+
+ ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data);
+ if (ret)
+ goto unlock;
+
+ priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE;
+
+ peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state);
+
+skip_update:
+ *val = priv->dimm[dimm_no].temp.value;
+unlock:
+ mutex_unlock(&priv->dimm[dimm_no].temp.state.lock);
+ return ret;
+}
+
+static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
+{
+ int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
+ int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
+ u32 data;
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state))
+ return 0;
+
+ ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
+ if (ret == -ENODATA) /* Use default or previous value */
+ return 0;
+ if (ret)
+ return ret;
+
+ priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE;
+ priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE;
+
+ peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state);
+
+ return 0;
+}
+
+static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type,
+ int dimm_no, long *val)
+{
+ int ret;
+
+ mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock);
+ ret = update_thresholds(priv, dimm_no);
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case temp_max_type:
+ *val = priv->dimm[dimm_no].thresholds.temp_max;
+ break;
+ case temp_crit_type:
+ *val = priv->dimm[dimm_no].thresholds.temp_crit;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock);
+
+ return ret;
+}
+
+static int dimmtemp_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (const char *)priv->dimmtemp_label[channel];
+
+ return 0;
+}
+
+static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return get_dimm_temp(priv, channel, val);
+ case hwmon_temp_max:
+ return get_dimm_thresholds(priv, temp_max_type, channel, val);
+ case hwmon_temp_crit:
+ return get_dimm_thresholds(priv, temp_crit_type, channel, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_dimmtemp *priv = data;
+
+ if (test_bit(channel, priv->dimm_mask))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops peci_dimmtemp_ops = {
+ .is_visible = dimmtemp_is_visible,
+ .read_string = dimmtemp_read_string,
+ .read = dimmtemp_read,
+};
+
+static int check_populated_dimms(struct peci_dimmtemp *priv)
+{
+ int chan_rank_max = priv->gen_info->chan_rank_max;
+ int dimm_idx_max = priv->gen_info->dimm_idx_max;
+ u32 chan_rank_empty = 0;
+ u64 dimm_mask = 0;
+ int chan_rank, dimm_idx, ret;
+ u32 pcs;
+
+ BUILD_BUG_ON(BITS_PER_TYPE(chan_rank_empty) < CHAN_RANK_MAX);
+ BUILD_BUG_ON(BITS_PER_TYPE(dimm_mask) < DIMM_NUMS_MAX);
+ if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) {
+ WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d",
+ chan_rank_max, dimm_idx_max);
+ return -EINVAL;
+ }
+
+ for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
+ ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs);
+ if (ret) {
+ /*
+ * Overall, we expect either success or -EINVAL in
+ * order to determine whether DIMM is populated or not.
+ * For anything else we fall back to deferring the
+ * detection to be performed at a later point in time.
+ */
+ if (ret == -EINVAL) {
+ chan_rank_empty |= BIT(chan_rank);
+ continue;
+ }
+
+ return -EAGAIN;
+ }
+
+ for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++)
+ if (__dimm_temp(pcs, dimm_idx))
+ dimm_mask |= BIT(chan_rank * dimm_idx_max + dimm_idx);
+ }
+
+ /*
+ * If we got all -EINVALs, it means that the CPU doesn't have any
+ * DIMMs. Unfortunately, it may also happen at the very start of
+ * host platform boot. Retrying a couple of times lets us make sure
+ * that the state is persistent.
+ */
+ if (chan_rank_empty == GENMASK(chan_rank_max - 1, 0)) {
+ if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) {
+ priv->no_dimm_retry_count++;
+
+ return -EAGAIN;
+ }
+
+ return -ENODEV;
+ }
+
+ /*
+ * It's possible that memory training is not done yet. In this case we
+ * defer the detection to be performed at a later point in time.
+ */
+ if (!dimm_mask) {
+ priv->no_dimm_retry_count = 0;
+ return -EAGAIN;
+ }
+
+ dev_dbg(priv->dev, "Scanned populated DIMMs: %#llx\n", dimm_mask);
+
+ bitmap_from_u64(priv->dimm_mask, dimm_mask);
+
+ return 0;
+}
+
+static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan)
+{
+ int rank = chan / priv->gen_info->dimm_idx_max;
+ int idx = chan % priv->gen_info->dimm_idx_max;
+
+ priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL,
+ "DIMM %c%d", 'A' + rank,
+ idx + 1);
+ if (!priv->dimmtemp_label[chan])
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const u32 peci_dimmtemp_temp_channel_config[] = {
+ [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
+ 0
+};
+
+static const struct hwmon_channel_info peci_dimmtemp_temp_channel = {
+ .type = hwmon_temp,
+ .config = peci_dimmtemp_temp_channel_config,
+};
+
+static const struct hwmon_channel_info *peci_dimmtemp_temp_info[] = {
+ &peci_dimmtemp_temp_channel,
+ NULL
+};
+
+static const struct hwmon_chip_info peci_dimmtemp_chip_info = {
+ .ops = &peci_dimmtemp_ops,
+ .info = peci_dimmtemp_temp_info,
+};
+
+static int create_dimm_temp_info(struct peci_dimmtemp *priv)
+{
+ int ret, i, channels;
+ struct device *dev;
+
+ /*
+ * We expect to either find populated DIMMs and carry on with creating
+ * sensors, or find out that there are no DIMMs populated.
+ * All other states mean that the platform never reached the state that
+ * allows to check DIMM state - causing us to retry later on.
+ */
+ ret = check_populated_dimms(priv);
+ if (ret == -ENODEV) {
+ dev_dbg(priv->dev, "No DIMMs found\n");
+ return 0;
+ } else if (ret) {
+ schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES);
+ dev_dbg(priv->dev, "Deferred populating DIMM temp info\n");
+ return ret;
+ }
+
+ channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max;
+
+ priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL);
+ if (!priv->dimmtemp_label)
+ return -ENOMEM;
+
+ for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) {
+ ret = create_dimm_temp_label(priv, i);
+ if (ret)
+ return ret;
+ mutex_init(&priv->dimm[i].thresholds.state.lock);
+ mutex_init(&priv->dimm[i].temp.state.lock);
+ }
+
+ dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv,
+ &peci_dimmtemp_chip_info, NULL);
+ if (IS_ERR(dev)) {
+ dev_err(priv->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name);
+
+ return 0;
+}
+
+static void create_dimm_temp_info_delayed(struct work_struct *work)
+{
+ struct peci_dimmtemp *priv = container_of(to_delayed_work(work),
+ struct peci_dimmtemp,
+ detect_work);
+ int ret;
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN)
+ dev_err(priv->dev, "Failed to populate DIMM temp info\n");
+}
+
+static void remove_delayed_work(void *_priv)
+{
+ struct peci_dimmtemp *priv = _priv;
+
+ cancel_delayed_work_sync(&priv->detect_work);
+}
+
+static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct peci_device *peci_dev = to_peci_device(dev->parent);
+ struct peci_dimmtemp *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d",
+ peci_dev->info.socket_id);
+ if (!priv->name)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->peci_dev = peci_dev;
+ priv->gen_info = (const struct dimm_info *)id->driver_data;
+
+ /*
+ * This is just a sanity check. Since we're using commands that are
+ * guaranteed to be supported on a given platform, we should never see
+ * revision lower than expected.
+ */
+ if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision)
+ dev_warn(priv->dev,
+ "Unexpected PECI revision %#x, some features may be unavailable\n",
+ peci_dev->info.peci_revision);
+
+ INIT_DELAYED_WORK(&priv->detect_work, create_dimm_temp_info_delayed);
+
+ ret = devm_add_action_or_reset(priv->dev, remove_delayed_work, priv);
+ if (ret)
+ return ret;
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN) {
+ dev_err(dev, "Failed to populate DIMM temp info\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
+{
+ u8 dev, func;
+ u16 reg;
+ int ret;
+
+ /*
+ * Device 20, Function 0: IMC 0 channel 0 -> rank 0
+ * Device 20, Function 1: IMC 0 channel 1 -> rank 1
+ * Device 21, Function 0: IMC 0 channel 2 -> rank 2
+ * Device 21, Function 1: IMC 0 channel 3 -> rank 3
+ * Device 23, Function 0: IMC 1 channel 0 -> rank 4
+ * Device 23, Function 1: IMC 1 channel 1 -> rank 5
+ * Device 24, Function 0: IMC 1 channel 2 -> rank 6
+ * Device 24, Function 1: IMC 1 channel 3 -> rank 7
+ */
+ dev = 20 + chan_rank / 2 + chan_rank / 4;
+ func = chan_rank % 2;
+ reg = 0x120 + dimm_order * 4;
+
+ ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
+{
+ u8 dev, func;
+ u16 reg;
+ int ret;
+
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 2
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 3
+ */
+ dev = 10 + chan_rank / 2 * 2;
+ func = (chan_rank % 2) ? 6 : 2;
+ reg = 0x120 + dimm_order * 4;
+
+ ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
+{
+ u8 dev, func;
+ u16 reg;
+ int ret;
+
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 11, Function 2: IMC 0 channel 2 -> rank 2
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 3
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 4
+ * Device 13, Function 2: IMC 1 channel 2 -> rank 5
+ */
+ dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0);
+ func = chan_rank % 3 == 1 ? 6 : 2;
+ reg = 0x120 + dimm_order * 4;
+
+ ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
+{
+ u32 reg_val;
+ u64 offset;
+ int ret;
+ u8 dev;
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
+ if (ret || !(reg_val & BIT(31)))
+ return -ENODATA; /* Use default or previous value */
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
+ if (ret)
+ return -ENODATA; /* Use default or previous value */
+
+ /*
+ * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
+ * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1
+ * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2
+ * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3
+ * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4
+ * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5
+ * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6
+ * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7
+ */
+ dev = 26 + chan_rank / 2;
+ offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000;
+
+ ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val),
+ dev, 0, offset, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dimm_info dimm_hsx = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_HSX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_HSX,
+ .min_peci_revision = 0x33,
+ .read_thresholds = &read_thresholds_hsx,
+};
+
+static const struct dimm_info dimm_bdx = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_BDX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_BDX,
+ .min_peci_revision = 0x33,
+ .read_thresholds = &read_thresholds_hsx,
+};
+
+static const struct dimm_info dimm_bdxd = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_BDXD,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_BDXD,
+ .min_peci_revision = 0x33,
+ .read_thresholds = &read_thresholds_bdxd,
+};
+
+static const struct dimm_info dimm_skx = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_SKX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SKX,
+ .min_peci_revision = 0x33,
+ .read_thresholds = &read_thresholds_skx,
+};
+
+static const struct dimm_info dimm_icx = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_ICX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_ICX,
+ .min_peci_revision = 0x40,
+ .read_thresholds = &read_thresholds_icx,
+};
+
+static const struct dimm_info dimm_icxd = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_ICXD,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD,
+ .min_peci_revision = 0x40,
+ .read_thresholds = &read_thresholds_icx,
+};
+
+static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
+ {
+ .name = "peci_cpu.dimmtemp.hsx",
+ .driver_data = (kernel_ulong_t)&dimm_hsx,
+ },
+ {
+ .name = "peci_cpu.dimmtemp.bdx",
+ .driver_data = (kernel_ulong_t)&dimm_bdx,
+ },
+ {
+ .name = "peci_cpu.dimmtemp.bdxd",
+ .driver_data = (kernel_ulong_t)&dimm_bdxd,
+ },
+ {
+ .name = "peci_cpu.dimmtemp.skx",
+ .driver_data = (kernel_ulong_t)&dimm_skx,
+ },
+ {
+ .name = "peci_cpu.dimmtemp.icx",
+ .driver_data = (kernel_ulong_t)&dimm_icx,
+ },
+ {
+ .name = "peci_cpu.dimmtemp.icxd",
+ .driver_data = (kernel_ulong_t)&dimm_icxd,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids);
+
+static struct auxiliary_driver peci_dimmtemp_driver = {
+ .probe = peci_dimmtemp_probe,
+ .id_table = peci_dimmtemp_ids,
+};
+
+module_auxiliary_driver(peci_dimmtemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("PECI dimmtemp driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PECI_CPU);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ba0b3eb131f1..be1ad8ce54aa 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -297,6 +297,17 @@ config MFD_ASIC3
This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones)
+config MFD_TN48M_CPLD
+ tristate "Delta Networks TN48M switch CPLD driver"
+ depends on I2C
+ depends on ARCH_MVEBU || COMPILE_TEST
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Select this option to enable support for Delta Networks TN48M switch
+ CPLD. It consists of reset and GPIO drivers. CPLD provides GPIOS-s
+ for the SFP slots as well as power supply related information.
+ SFP support depends on the GPIO driver being selected.
+
config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index 51536691ad9d..0d6a51ed6286 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -64,6 +64,7 @@ static int simple_mfd_i2c_probe(struct i2c_client *i2c)
static const struct of_device_id simple_mfd_i2c_of_match[] = {
{ .compatible = "kontron,sl28cpld" },
+ { .compatible = "delta,tn48m-cpld" },
{}
};
MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0f5a49fc7c9e..a2b26426efba 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -470,6 +470,18 @@ config HISI_HIKEY_USB
switching between the dual-role USB-C port and the USB-A host ports
using only one USB controller.
+config OPEN_DICE
+ tristate "Open Profile for DICE driver"
+ depends on OF_RESERVED_MEM
+ help
+ This driver exposes a DICE reserved memory region to userspace via
+ a character device. The memory region contains Compound Device
+ Identifiers (CDIs) generated by firmware as an output of DICE
+ measured boot flow. Userspace can use CDIs for remote attestation
+ and sealing.
+
+ If unsure, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index a086197af544..70e800e9127f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o
+obj-$(CONFIG_OPEN_DICE) += open-dice.o
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index de6d44a158bb..3f514d77a843 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -266,7 +266,7 @@ static int alcor_pci_probe(struct pci_dev *pdev,
if (!priv)
return -ENOMEM;
- ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL);
if (ret < 0)
return ret;
priv->id = ret;
@@ -280,7 +280,8 @@ static int alcor_pci_probe(struct pci_dev *pdev,
ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI);
if (ret) {
dev_err(&pdev->dev, "Cannot request region\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_free_ida;
}
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -324,6 +325,8 @@ static int alcor_pci_probe(struct pci_dev *pdev,
error_release_regions:
pci_release_regions(pdev);
+error_free_ida:
+ ida_free(&alcor_pci_idr, priv->id);
return ret;
}
@@ -337,7 +340,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
mfd_remove_devices(&pdev->dev);
- ida_simple_remove(&alcor_pci_idr, priv->id);
+ ida_free(&alcor_pci_idr, priv->id);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index 4c5621b17a6f..06457e875a90 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -76,7 +76,7 @@ static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
}
-static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
}
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 29f5414072bf..52b0a476ba51 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -47,7 +47,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
}
}
-static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
}
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 4bcfbc9afbac..d676cf63a966 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -72,6 +72,8 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ if (CHK_PCI_PID(pcr, 0x522A))
+ pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
@@ -171,6 +173,28 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00);
+ if (CHK_PCI_PID(pcr, 0x522A))
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ if (pcr->rtd3_en) {
+ if (CHK_PCI_PID(pcr, 0x522A)) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PM_CTRL3, 0x01, 0x01);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PME_FORCE_CTL, 0x30, 0x30);
+ } else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x01, 0x01);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PME_FORCE_CTL, 0xFF, 0x33);
+ }
+ } else {
+ if (CHK_PCI_PID(pcr, 0x522A)) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PM_CTRL3, 0x01, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PME_FORCE_CTL, 0x30, 0x20);
+ } else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PME_FORCE_CTL, 0xFF, 0x30);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x01, 0x00);
+ }
+ }
+
if (option->force_clkreq_0)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
@@ -438,6 +462,28 @@ static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
+static void rts522a_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS522A_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS522A_PME_FORCE_CTL, 0x30, 0x20);
+ }
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+
static void rts522a_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
@@ -473,6 +519,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
.card_power_on = rts5227_card_power_on,
.card_power_off = rts5227_card_power_off,
.switch_output_voltage = rts522a_switch_output_voltage,
+ .force_power_down = rts522a_force_power_down,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.set_l1off_cfg_sub_d0 = rts522a_set_l1off_cfg_sub_d0,
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index ffc128278613..af581f4f74d1 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -91,7 +91,7 @@ static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40);
}
-static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
@@ -102,6 +102,14 @@ static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ }
+
rtsx_pci_write_register(pcr, FPDCTL,
SSC_POWER_DOWN, SSC_POWER_DOWN);
}
@@ -480,9 +488,18 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+
+ if (pcr->rtd3_en) {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
- FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ FORCE_PM_CONTROL | FORCE_PM_VALUE,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE);
+ } else {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ }
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
return 0;
}
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index c748eaf1ec1f..b0edd8006d52 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -44,7 +44,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
}
-static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 53f3a1f45c4a..91d240dd68fa 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -74,7 +74,8 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
- pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg);
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
+ pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
@@ -143,6 +144,27 @@ static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
return 0;
}
+static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
+ }
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
static void rts52xa_save_content_from_efuse(struct rtsx_pcr *pcr)
{
u8 cnt, sv;
@@ -281,8 +303,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
- if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+ }
if (pcr->rtd3_en) {
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
@@ -724,6 +749,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
.card_power_on = rtsx_base_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
+ .force_power_down = rts52xa_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
@@ -841,6 +867,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
.card_power_on = rts525a_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
+ .force_power_down = rts52xa_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 1fd4e0e50730..a77585ab0f30 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -91,7 +91,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
}
-static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
@@ -103,6 +103,24 @@ static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+
+ rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
+ RTS5261_INFORM_RTD3_COLD, RTS5261_INFORM_RTD3_COLD);
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_FORCE_PRSNT_LOW, RTS5261_FORCE_PRSNT_LOW);
+
+ }
+
rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
SSC_POWER_DOWN, SSC_POWER_DOWN);
}
@@ -536,9 +554,18 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
- rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
- FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ if (pcr->rtd3_en) {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE);
+ } else {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ }
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
/* Clear Enter RTD3_cold Information*/
rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 6ac509c1821c..2a2619e3c72c 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -152,20 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
if (pcr->remove_pci)
return;
- if (pcr->rtd3_en)
- if (pcr->is_runtime_suspended) {
- pm_runtime_get(&(pcr->pci->dev));
- pcr->is_runtime_suspended = false;
- }
-
if (pcr->state != PDEV_STAT_RUN) {
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
rtsx_pm_full_on(pcr);
}
-
- mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
}
EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
@@ -1062,73 +1054,7 @@ static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
return 0;
}
-static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
-{
- if (pcr->ops->set_aspm)
- pcr->ops->set_aspm(pcr, true);
- else
- rtsx_comm_set_aspm(pcr, true);
-}
-
-static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
-{
- struct rtsx_cr_option *option = &pcr->option;
-
- if (option->ltr_enabled) {
- u32 latency = option->ltr_l1off_latency;
-
- if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
- mdelay(option->l1_snooze_delay);
-
- rtsx_set_ltr_latency(pcr, latency);
- }
-
- if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
- rtsx_set_l1off_sub_cfg_d0(pcr, 0);
-
- rtsx_enable_aspm(pcr);
-}
-
-static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
-{
- rtsx_comm_pm_power_saving(pcr);
-}
-
-static void rtsx_pci_rtd3_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
-
- pcr_dbg(pcr, "--> %s\n", __func__);
- if (!pcr->is_runtime_suspended)
- pm_runtime_put(&(pcr->pci->dev));
-}
-
-static void rtsx_pci_idle_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
-
- pcr_dbg(pcr, "--> %s\n", __func__);
-
- mutex_lock(&pcr->pcr_mutex);
-
- pcr->state = PDEV_STAT_IDLE;
-
- if (pcr->ops->disable_auto_blink)
- pcr->ops->disable_auto_blink(pcr);
- if (pcr->ops->turn_off_led)
- pcr->ops->turn_off_led(pcr);
-
- rtsx_pm_power_saving(pcr);
-
- mutex_unlock(&pcr->pcr_mutex);
-
- if (pcr->rtd3_en)
- mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
-}
-
-static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
@@ -1142,7 +1068,7 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
}
-static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
@@ -1154,9 +1080,9 @@ static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
if (pcr->ops->force_power_down)
- pcr->ops->force_power_down(pcr, pm_state);
+ pcr->ops->force_power_down(pcr, pm_state, runtime);
else
- rtsx_base_force_power_down(pcr, pm_state);
+ rtsx_base_force_power_down(pcr);
}
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
@@ -1598,7 +1524,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
- INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
pcr->msi_en = msi_en;
if (pcr->msi_en) {
@@ -1623,20 +1548,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
}
- if (pcr->rtd3_en) {
- INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
- pm_runtime_allow(&pcidev->dev);
- pm_runtime_enable(&pcidev->dev);
- pcr->is_runtime_suspended = false;
- }
-
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
if (ret < 0)
goto free_slots;
- schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+ pm_runtime_allow(&pcidev->dev);
+ pm_runtime_put(&pcidev->dev);
return 0;
@@ -1668,11 +1587,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
- if (pcr->rtd3_en)
- pm_runtime_get_noresume(&pcr->pci->dev);
-
pcr->remove_pci = true;
+ pm_runtime_get_sync(&pcidev->dev);
+ pm_runtime_forbid(&pcidev->dev);
+
/* Disable interrupts at the pcr level */
spin_lock_irq(&pcr->lock);
rtsx_pci_writel(pcr, RTSX_BIER, 0);
@@ -1680,9 +1599,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
spin_unlock_irq(&pcr->lock);
cancel_delayed_work_sync(&pcr->carddet_work);
- cancel_delayed_work_sync(&pcr->idle_work);
- if (pcr->rtd3_en)
- cancel_delayed_work_sync(&pcr->rtd3_work);
mfd_remove_devices(&pcidev->dev);
@@ -1700,11 +1616,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
idr_remove(&rtsx_pci_idr, pcr->id);
spin_unlock(&rtsx_pci_lock);
- if (pcr->rtd3_en) {
- pm_runtime_disable(&pcr->pci->dev);
- pm_runtime_put_noidle(&pcr->pci->dev);
- }
-
kfree(pcr->slots);
kfree(pcr);
kfree(handle);
@@ -1717,22 +1628,16 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
{
struct pci_dev *pcidev = to_pci_dev(dev_d);
- struct pcr_handle *handle;
- struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
- handle = pci_get_drvdata(pcidev);
- pcr = handle->pcr;
-
- cancel_delayed_work(&pcr->carddet_work);
- cancel_delayed_work(&pcr->idle_work);
+ cancel_delayed_work_sync(&pcr->carddet_work);
mutex_lock(&pcr->pcr_mutex);
- rtsx_pci_power_off(pcr, HOST_ENTER_S3);
-
- device_wakeup_disable(dev_d);
+ rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
mutex_unlock(&pcr->pcr_mutex);
return 0;
@@ -1741,15 +1646,12 @@ static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
{
struct pci_dev *pcidev = to_pci_dev(dev_d);
- struct pcr_handle *handle;
- struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
int ret = 0;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
- handle = pci_get_drvdata(pcidev);
- pcr = handle->pcr;
-
mutex_lock(&pcr->pcr_mutex);
ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
@@ -1760,8 +1662,6 @@ static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
if (ret)
goto out;
- schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
-
out:
mutex_unlock(&pcr->pcr_mutex);
return ret;
@@ -1769,16 +1669,46 @@ out:
#ifdef CONFIG_PM
+static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, true);
+ else
+ rtsx_comm_set_aspm(pcr, true);
+}
+
+static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ltr_enabled) {
+ u32 latency = option->ltr_l1off_latency;
+
+ if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
+ mdelay(option->l1_snooze_delay);
+
+ rtsx_set_ltr_latency(pcr, latency);
+ }
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 0);
+
+ rtsx_enable_aspm(pcr);
+}
+
+static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ rtsx_comm_pm_power_saving(pcr);
+}
+
static void rtsx_pci_shutdown(struct pci_dev *pcidev)
{
- struct pcr_handle *handle;
- struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
- handle = pci_get_drvdata(pcidev);
- pcr = handle->pcr;
- rtsx_pci_power_off(pcr, HOST_ENTER_S1);
+ rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
pci_disable_device(pcidev);
free_irq(pcr->irq, (void *)pcr);
@@ -1786,47 +1716,63 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
pci_disable_msi(pcr->pci);
}
+static int rtsx_pci_runtime_idle(struct device *device)
+{
+ struct pci_dev *pcidev = to_pci_dev(device);
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
+
+ dev_dbg(device, "--> %s\n", __func__);
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ pcr->state = PDEV_STAT_IDLE;
+
+ if (pcr->ops->disable_auto_blink)
+ pcr->ops->disable_auto_blink(pcr);
+ if (pcr->ops->turn_off_led)
+ pcr->ops->turn_off_led(pcr);
+
+ rtsx_pm_power_saving(pcr);
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ if (pcr->rtd3_en)
+ pm_schedule_suspend(device, 10000);
+
+ return -EBUSY;
+}
+
static int rtsx_pci_runtime_suspend(struct device *device)
{
struct pci_dev *pcidev = to_pci_dev(device);
- struct pcr_handle *handle;
- struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
- handle = pci_get_drvdata(pcidev);
- pcr = handle->pcr;
- dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+ dev_dbg(device, "--> %s\n", __func__);
- cancel_delayed_work(&pcr->carddet_work);
- cancel_delayed_work(&pcr->rtd3_work);
- cancel_delayed_work(&pcr->idle_work);
+ cancel_delayed_work_sync(&pcr->carddet_work);
mutex_lock(&pcr->pcr_mutex);
- rtsx_pci_power_off(pcr, HOST_ENTER_S3);
+ rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
mutex_unlock(&pcr->pcr_mutex);
- pcr->is_runtime_suspended = true;
-
return 0;
}
static int rtsx_pci_runtime_resume(struct device *device)
{
struct pci_dev *pcidev = to_pci_dev(device);
- struct pcr_handle *handle;
- struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
- handle = pci_get_drvdata(pcidev);
- pcr = handle->pcr;
- dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+ dev_dbg(device, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
- if (pcr->ops->fetch_vendor_settings)
- pcr->ops->fetch_vendor_settings(pcr);
-
rtsx_pci_init_hw(pcr);
if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
@@ -1834,8 +1780,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
pcr->slots[RTSX_SD_CARD].p_dev);
}
- schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
-
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
@@ -1850,7 +1794,7 @@ static int rtsx_pci_runtime_resume(struct device *device)
static const struct dev_pm_ops rtsx_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
- SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
};
static struct pci_driver rtsx_pci_driver = {
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index daf057c4eea6..37d1f316ae17 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -15,6 +15,8 @@
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
+#define RTS522A_PME_FORCE_CTL 0xFF78
+#define RTS522A_AUTOLOAD_CFG1 0xFF7C
#define RTS522A_PM_CTRL3 0xFF7E
#define RTS524A_PME_FORCE_CTL 0xFF78
@@ -25,6 +27,7 @@
#define REG_EFUSE_POWEROFF 0x00
#define RTS5250_CLK_CFG3 0xFF79
#define RTS525A_CFG_MEM_PD 0xF0
+#define RTS524A_AUTOLOAD_CFG1 0xFF7C
#define RTS524A_PM_CTRL3 0xFF7E
#define RTS525A_BIOS_CFG 0xFF2D
#define RTS525A_LOAD_BIOS_FLAG 0x01
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index bee727ed98db..91f96abbb3f9 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -309,7 +309,7 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
u32 val;
int err;
- strncpy(chip->name, "at25", sizeof(chip->name));
+ strscpy(chip->name, "at25", sizeof(chip->name));
err = device_property_read_u32(dev, "size", &val);
if (err)
@@ -370,7 +370,7 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
u8 id[FM25_ID_LEN];
int i;
- strncpy(chip->name, "fm25", sizeof(chip->name));
+ strscpy(chip->name, "fm25", sizeof(chip->name));
/* Get ID of chip */
fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index d06458a4858e..ab33bb5e2e7a 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -44,14 +44,14 @@ void lkdtm_FORTIFIED_SUBOBJECT(void)
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
- pr_info("trying to strcpy past the end of a member of a struct\n");
+ pr_info("trying to strncpy past the end of a member of a struct\n");
/*
- * memcpy(target.a, src, 20); will hit a compile error because the
+ * strncpy(target.a, src, 20); will hit a compile error because the
* compiler knows at build time that target.a < 20 bytes. Use a
* volatile to force a runtime error.
*/
- memcpy(target.a, src, size);
+ strncpy(target.a, src, size);
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 06734670a732..31264ab2eb13 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -2148,6 +2148,7 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
+EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
{
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 67bb6a25fd0a..888c27bc3f1a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -120,6 +120,7 @@
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
+# define PCI_CFG_HFS_3_FW_SKU_IGN 0x00000000
# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index d3a6c0728645..719fee9af156 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1257,7 +1257,11 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
dev_warn(dev->dev, "FW not ready: resetting.\n");
- schedule_work(&dev->reset_work);
+ if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
+ dev->dev_state == MEI_DEV_POWER_DOWN)
+ mei_cl_all_disconnect(dev);
+ else if (dev->dev_state != MEI_DEV_DISABLED)
+ schedule_work(&dev->reset_work);
goto end;
}
@@ -1289,12 +1293,14 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
if (rets == -ENODATA)
break;
- if (rets &&
- (dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_POWER_DOWN)) {
- dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
- rets);
- schedule_work(&dev->reset_work);
+ if (rets) {
+ dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
+ rets, dev->dev_state);
+ if (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_DISABLED &&
+ dev->dev_state != MEI_DEV_POWERING_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)
+ schedule_work(&dev->reset_work);
goto end;
}
}
@@ -1405,16 +1411,16 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
.quirk_probe = mei_me_fw_type_sps_4
/**
- * mei_me_fw_type_sps() - check for sps sku
+ * mei_me_fw_type_sps_ign() - check for sps or ign sku
*
- * Read ME FW Status register to check for SPS Firmware.
- * The SPS FW is only signaled in pci function 0
+ * Read ME FW Status register to check for SPS or IGN Firmware.
+ * The SPS/IGN FW is only signaled in pci function 0
*
* @pdev: pci device
*
- * Return: true in case of SPS firmware
+ * Return: true in case of SPS/IGN firmware
*/
-static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
{
u32 reg;
u32 fw_type;
@@ -1427,14 +1433,15 @@ static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
- return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+ return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
+ fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
}
#define MEI_CFG_KIND_ITOUCH \
.kind = "itouch"
-#define MEI_CFG_FW_SPS \
- .quirk_probe = mei_me_fw_type_sps
+#define MEI_CFG_FW_SPS_IGN \
+ .quirk_probe = mei_me_fw_type_sps_ign
#define MEI_CFG_FW_VER_SUPP \
.fw_ver_supported = 1
@@ -1535,7 +1542,7 @@ static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_IGN,
};
/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
@@ -1545,7 +1552,7 @@ static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
MEI_CFG_KIND_ITOUCH,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_IGN,
};
/* Tiger Lake and newer devices */
@@ -1562,7 +1569,7 @@ static const struct mei_cfg mei_me_pch15_sps_cfg = {
MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
MEI_CFG_TRC,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_IGN,
};
/*
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f79076c67256..eb052005ca86 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -161,6 +161,11 @@ int mei_reset(struct mei_device *dev)
return ret;
}
+ if (dev->dev_state != MEI_DEV_RESETTING) {
+ dev_dbg(dev->dev, "wrong state = %d on link start\n", dev->dev_state);
+ return 0;
+ }
+
dev_dbg(dev->dev, "link is established start sending messages.\n");
mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3a45aaf002ac..a05cdb25d0c4 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -192,14 +193,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto end;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index ab039c115381..9670d02c927f 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -94,7 +94,7 @@ struct ocxl_link {
struct spa *spa;
void *platform_data;
};
-static struct list_head links_list = LIST_HEAD_INIT(links_list);
+static LIST_HEAD(links_list);
static DEFINE_MUTEX(links_list_lock);
enum xsl_response {
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
new file mode 100644
index 000000000000..c61be3404c6f
--- /dev/null
+++ b/drivers/misc/open-dice.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ *
+ * Driver for Open Profile for DICE.
+ *
+ * This driver takes ownership of a reserved memory region containing data
+ * generated by the Open Profile for DICE measured boot protocol. The memory
+ * contents are not interpreted by the kernel but can be mapped into a userspace
+ * process via a misc device. Userspace can also request a wipe of the memory.
+ *
+ * Userspace can access the data with (w/o error handling):
+ *
+ * fd = open("/dev/open-dice0", O_RDWR);
+ * read(fd, &size, sizeof(unsigned long));
+ * data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ * write(fd, NULL, 0); // wipe
+ * close(fd);
+ */
+
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "open-dice"
+
+struct open_dice_drvdata {
+ struct mutex lock;
+ char name[16];
+ struct reserved_mem *rmem;
+ struct miscdevice misc;
+};
+
+static inline struct open_dice_drvdata *to_open_dice_drvdata(struct file *filp)
+{
+ return container_of(filp->private_data, struct open_dice_drvdata, misc);
+}
+
+static int open_dice_wipe(struct open_dice_drvdata *drvdata)
+{
+ void *kaddr;
+
+ mutex_lock(&drvdata->lock);
+ kaddr = devm_memremap(drvdata->misc.this_device, drvdata->rmem->base,
+ drvdata->rmem->size, MEMREMAP_WC);
+ if (IS_ERR(kaddr)) {
+ mutex_unlock(&drvdata->lock);
+ return PTR_ERR(kaddr);
+ }
+
+ memset(kaddr, 0, drvdata->rmem->size);
+ devm_memunmap(drvdata->misc.this_device, kaddr);
+ mutex_unlock(&drvdata->lock);
+ return 0;
+}
+
+/*
+ * Copies the size of the reserved memory region to the user-provided buffer.
+ */
+static ssize_t open_dice_read(struct file *filp, char __user *ptr, size_t len,
+ loff_t *off)
+{
+ unsigned long val = to_open_dice_drvdata(filp)->rmem->size;
+
+ return simple_read_from_buffer(ptr, len, off, &val, sizeof(val));
+}
+
+/*
+ * Triggers a wipe of the reserved memory region. The user-provided pointer
+ * is never dereferenced.
+ */
+static ssize_t open_dice_write(struct file *filp, const char __user *ptr,
+ size_t len, loff_t *off)
+{
+ if (open_dice_wipe(to_open_dice_drvdata(filp)))
+ return -EIO;
+
+ /* Consume the input buffer. */
+ return len;
+}
+
+/*
+ * Creates a mapping of the reserved memory region in user address space.
+ */
+static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct open_dice_drvdata *drvdata = to_open_dice_drvdata(filp);
+
+ /* Do not allow userspace to modify the underlying data. */
+ if ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))
+ return -EPERM;
+
+ /* Ensure userspace cannot acquire VM_WRITE + VM_SHARED later. */
+ if (vma->vm_flags & VM_WRITE)
+ vma->vm_flags &= ~VM_MAYSHARE;
+ else if (vma->vm_flags & VM_SHARED)
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ /* Create write-combine mapping so all clients observe a wipe. */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+ return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
+}
+
+static const struct file_operations open_dice_fops = {
+ .owner = THIS_MODULE,
+ .read = open_dice_read,
+ .write = open_dice_write,
+ .mmap = open_dice_mmap,
+};
+
+static int __init open_dice_probe(struct platform_device *pdev)
+{
+ static unsigned int dev_idx;
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *rmem;
+ struct open_dice_drvdata *drvdata;
+ int ret;
+
+ rmem = of_reserved_mem_lookup(dev->of_node);
+ if (!rmem) {
+ dev_err(dev, "failed to lookup reserved memory\n");
+ return -EINVAL;
+ }
+
+ if (!rmem->size || (rmem->size > ULONG_MAX)) {
+ dev_err(dev, "invalid memory region size\n");
+ return -EINVAL;
+ }
+
+ if (!PAGE_ALIGNED(rmem->base) || !PAGE_ALIGNED(rmem->size)) {
+ dev_err(dev, "memory region must be page-aligned\n");
+ return -EINVAL;
+ }
+
+ drvdata = devm_kmalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ *drvdata = (struct open_dice_drvdata){
+ .lock = __MUTEX_INITIALIZER(drvdata->lock),
+ .rmem = rmem,
+ .misc = (struct miscdevice){
+ .parent = dev,
+ .name = drvdata->name,
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &open_dice_fops,
+ .mode = 0600,
+ },
+ };
+
+ /* Index overflow check not needed, misc_register() will fail. */
+ snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
+
+ ret = misc_register(&drvdata->misc);
+ if (ret) {
+ dev_err(dev, "failed to register misc device '%s': %d\n",
+ drvdata->name, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+}
+
+static int open_dice_remove(struct platform_device *pdev)
+{
+ struct open_dice_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ misc_deregister(&drvdata->misc);
+ return 0;
+}
+
+static const struct of_device_id open_dice_of_match[] = {
+ { .compatible = "google,open-dice" },
+ {},
+};
+
+static struct platform_driver open_dice_driver = {
+ .remove = open_dice_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = open_dice_of_match,
+ },
+};
+
+static int __init open_dice_init(void)
+{
+ int ret = platform_driver_probe(&open_dice_driver, open_dice_probe);
+
+ /* DICE regions are optional. Succeed even with zero instances. */
+ return (ret == -ENODEV) ? 0 : ret;
+}
+
+static void __exit open_dice_exit(void)
+{
+ platform_driver_unregister(&open_dice_driver);
+}
+
+module_init(open_dice_init);
+module_exit(open_dice_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("David Brazdil <dbrazdil@google.com>");
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index e4c067c61251..5efc869fe59a 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -530,12 +530,6 @@ struct gru_blade_state {
for ((i) = (k)*GRU_CBR_AU_SIZE; \
(i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
-/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */
-#define for_each_dsr_in_allocation_map(i, map, k) \
- for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \
- for ((i) = (k) * GRU_DSR_AU_CL; \
- (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
-
#define gseg_physical_address(gru, ctxnum) \
((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE)
#define gseg_virtual_address(gru, ctxnum) \
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 1018dc77269d..981b19308e6f 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/processor.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -31,6 +32,12 @@
#define VMCI_UTIL_NUM_RESOURCES 1
+/*
+ * Datagram buffers for DMA send/receive must accommodate at least
+ * a maximum sized datagram and the header.
+ */
+#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
+
static bool vmci_disable_msi;
module_param_named(disable_msi, vmci_disable_msi, bool, 0);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -45,13 +52,18 @@ static u32 vm_context_id = VMCI_INVALID_ID;
struct vmci_guest_device {
struct device *dev; /* PCI device we are attached to */
void __iomem *iobase;
+ void __iomem *mmio_base;
bool exclusive_vectors;
struct tasklet_struct datagram_tasklet;
struct tasklet_struct bm_tasklet;
+ struct wait_queue_head inout_wq;
void *data_buffer;
+ dma_addr_t data_buffer_base;
+ void *tx_buffer;
+ dma_addr_t tx_buffer_base;
void *notification_bitmap;
dma_addr_t notification_base;
};
@@ -89,6 +101,92 @@ u32 vmci_get_vm_context_id(void)
return vm_context_id;
}
+static unsigned int vmci_read_reg(struct vmci_guest_device *dev, u32 reg)
+{
+ if (dev->mmio_base != NULL)
+ return readl(dev->mmio_base + reg);
+ return ioread32(dev->iobase + reg);
+}
+
+static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg)
+{
+ if (dev->mmio_base != NULL)
+ writel(val, dev->mmio_base + reg);
+ else
+ iowrite32(val, dev->iobase + reg);
+}
+
+static void vmci_read_data(struct vmci_guest_device *vmci_dev,
+ void *dest, size_t size)
+{
+ if (vmci_dev->mmio_base == NULL)
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ dest, size);
+ else {
+ /*
+ * For DMA datagrams, the data_buffer will contain the header on the
+ * first page, followed by the incoming datagram(s) on the following
+ * pages. The header uses an S/G element immediately following the
+ * header on the first page to point to the data area.
+ */
+ struct vmci_data_in_out_header *buffer_header = vmci_dev->data_buffer;
+ struct vmci_sg_elem *sg_array = (struct vmci_sg_elem *)(buffer_header + 1);
+ size_t buffer_offset = dest - vmci_dev->data_buffer;
+
+ buffer_header->opcode = 1;
+ buffer_header->size = 1;
+ buffer_header->busy = 0;
+ sg_array[0].addr = vmci_dev->data_buffer_base + buffer_offset;
+ sg_array[0].size = size;
+
+ vmci_write_reg(vmci_dev, lower_32_bits(vmci_dev->data_buffer_base),
+ VMCI_DATA_IN_LOW_ADDR);
+
+ wait_event(vmci_dev->inout_wq, buffer_header->busy == 1);
+ }
+}
+
+static int vmci_write_data(struct vmci_guest_device *dev,
+ struct vmci_datagram *dg)
+{
+ int result;
+
+ if (dev->mmio_base != NULL) {
+ struct vmci_data_in_out_header *buffer_header = dev->tx_buffer;
+ u8 *dg_out_buffer = (u8 *)(buffer_header + 1);
+
+ if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * Initialize send buffer with outgoing datagram
+ * and set up header for inline data. Device will
+ * not access buffer asynchronously - only after
+ * the write to VMCI_DATA_OUT_LOW_ADDR.
+ */
+ memcpy(dg_out_buffer, dg, VMCI_DG_SIZE(dg));
+ buffer_header->opcode = 0;
+ buffer_header->size = VMCI_DG_SIZE(dg);
+ buffer_header->busy = 1;
+
+ vmci_write_reg(dev, lower_32_bits(dev->tx_buffer_base),
+ VMCI_DATA_OUT_LOW_ADDR);
+
+ /* Caller holds a spinlock, so cannot block. */
+ spin_until_cond(buffer_header->busy == 0);
+
+ result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
+ if (result == VMCI_SUCCESS)
+ result = (int)buffer_header->result;
+ } else {
+ iowrite8_rep(dev->iobase + VMCI_DATA_OUT_ADDR,
+ dg, VMCI_DG_SIZE(dg));
+ result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
+ }
+
+ return result;
+}
+
/*
* VM to hypervisor call mechanism. We use the standard VMware naming
* convention since shared code is calling this function as well.
@@ -114,9 +212,8 @@ int vmci_send_datagram(struct vmci_datagram *dg)
spin_lock_irqsave(&vmci_dev_spinlock, flags);
if (vmci_dev_g) {
- iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
- dg, VMCI_DG_SIZE(dg));
- result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+ vmci_write_data(vmci_dev_g, dg);
+ result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
} else {
result = VMCI_ERROR_UNAVAILABLE;
}
@@ -156,9 +253,9 @@ static void vmci_guest_cid_update(u32 sub_id,
/*
* Verify that the host supports the hypercalls we need. If it does not,
- * try to find fallback hypercalls and use those instead. Returns
- * true if required hypercalls (or fallback hypercalls) are
- * supported by the host, false otherwise.
+ * try to find fallback hypercalls and use those instead. Returns 0 if
+ * required hypercalls (or fallback hypercalls) are supported by the host,
+ * an error code otherwise.
*/
static int vmci_check_host_caps(struct pci_dev *pdev)
{
@@ -195,15 +292,17 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
}
/*
- * Reads datagrams from the data in port and dispatches them. We
- * always start reading datagrams into only the first page of the
- * datagram buffer. If the datagrams don't fit into one page, we
- * use the maximum datagram buffer size for the remainder of the
- * invocation. This is a simple heuristic for not penalizing
- * small datagrams.
+ * Reads datagrams from the device and dispatches them. For IO port
+ * based access to the device, we always start reading datagrams into
+ * only the first page of the datagram buffer. If the datagrams don't
+ * fit into one page, we use the maximum datagram buffer size for the
+ * remainder of the invocation. This is a simple heuristic for not
+ * penalizing small datagrams. For DMA-based datagrams, we always
+ * use the maximum datagram buffer size, since there is no performance
+ * penalty for doing so.
*
* This function assumes that it has exclusive access to the data
- * in port for the duration of the call.
+ * in register(s) for the duration of the call.
*/
static void vmci_dispatch_dgs(unsigned long data)
{
@@ -211,23 +310,41 @@ static void vmci_dispatch_dgs(unsigned long data)
u8 *dg_in_buffer = vmci_dev->data_buffer;
struct vmci_datagram *dg;
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
- size_t current_dg_in_buffer_size = PAGE_SIZE;
+ size_t current_dg_in_buffer_size;
size_t remaining_bytes;
+ bool is_io_port = vmci_dev->mmio_base == NULL;
BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
- ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
- vmci_dev->data_buffer, current_dg_in_buffer_size);
+ if (!is_io_port) {
+ /* For mmio, the first page is used for the header. */
+ dg_in_buffer += PAGE_SIZE;
+
+ /*
+ * For DMA-based datagram operations, there is no performance
+ * penalty for reading the maximum buffer size.
+ */
+ current_dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+ } else {
+ current_dg_in_buffer_size = PAGE_SIZE;
+ }
+ vmci_read_data(vmci_dev, dg_in_buffer, current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
+ /*
+ * Read through the buffer until an invalid datagram header is
+ * encountered. The exit condition for datagrams read through
+ * VMCI_DATA_IN_ADDR is a bit more complicated, since a datagram
+ * can start on any page boundary in the buffer.
+ */
while (dg->dst.resource != VMCI_INVALID_ID ||
- remaining_bytes > PAGE_SIZE) {
+ (is_io_port && remaining_bytes > PAGE_SIZE)) {
unsigned dg_in_size;
/*
- * When the input buffer spans multiple pages, a datagram can
- * start on any page boundary in the buffer.
+ * If using VMCI_DATA_IN_ADDR, skip to the next page
+ * as a datagram can start on any page boundary.
*/
if (dg->dst.resource == VMCI_INVALID_ID) {
dg = (struct vmci_datagram *)roundup(
@@ -277,11 +394,10 @@ static void vmci_dispatch_dgs(unsigned long data)
current_dg_in_buffer_size =
dg_in_buffer_size;
- ioread8_rep(vmci_dev->iobase +
- VMCI_DATA_IN_ADDR,
- vmci_dev->data_buffer +
+ vmci_read_data(vmci_dev,
+ dg_in_buffer +
remaining_bytes,
- current_dg_in_buffer_size -
+ current_dg_in_buffer_size -
remaining_bytes);
}
@@ -319,10 +435,8 @@ static void vmci_dispatch_dgs(unsigned long data)
current_dg_in_buffer_size = dg_in_buffer_size;
for (;;) {
- ioread8_rep(vmci_dev->iobase +
- VMCI_DATA_IN_ADDR,
- vmci_dev->data_buffer,
- current_dg_in_buffer_size);
+ vmci_read_data(vmci_dev, dg_in_buffer,
+ current_dg_in_buffer_size);
if (bytes_to_skip <= current_dg_in_buffer_size)
break;
@@ -339,8 +453,7 @@ static void vmci_dispatch_dgs(unsigned long data)
if (remaining_bytes < VMCI_DG_HEADERSIZE) {
/* Get the next batch of datagrams. */
- ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
- vmci_dev->data_buffer,
+ vmci_read_data(vmci_dev, dg_in_buffer,
current_dg_in_buffer_size);
dg = (struct vmci_datagram *)dg_in_buffer;
remaining_bytes = current_dg_in_buffer_size;
@@ -384,7 +497,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
unsigned int icr;
/* Acknowledge interrupt and determine what needs doing. */
- icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+ icr = vmci_read_reg(dev, VMCI_ICR_ADDR);
if (icr == 0 || icr == ~0)
return IRQ_NONE;
@@ -398,6 +511,12 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
icr &= ~VMCI_ICR_NOTIFICATION;
}
+
+ if (icr & VMCI_ICR_DMA_DATAGRAM) {
+ wake_up_all(&dev->inout_wq);
+ icr &= ~VMCI_ICR_DMA_DATAGRAM;
+ }
+
if (icr != 0)
dev_warn(dev->dev,
"Ignoring unknown interrupt cause (%d)\n",
@@ -423,13 +542,47 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
}
/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_DMA_DATAGRAM,
+ * which is for the completion of a DMA datagram send or receive operation.
+ * Will only get called if we are using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ wake_up_all(&dev->inout_wq);
+
+ return IRQ_HANDLED;
+}
+
+static void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev)
+{
+ if (vmci_dev->mmio_base != NULL) {
+ if (vmci_dev->tx_buffer != NULL)
+ dma_free_coherent(vmci_dev->dev,
+ VMCI_DMA_DG_BUFFER_SIZE,
+ vmci_dev->tx_buffer,
+ vmci_dev->tx_buffer_base);
+ if (vmci_dev->data_buffer != NULL)
+ dma_free_coherent(vmci_dev->dev,
+ VMCI_DMA_DG_BUFFER_SIZE,
+ vmci_dev->data_buffer,
+ vmci_dev->data_buffer_base);
+ } else {
+ vfree(vmci_dev->data_buffer);
+ }
+}
+
+/*
* Most of the initialization at module load time is done here.
*/
static int vmci_guest_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct vmci_guest_device *vmci_dev;
- void __iomem *iobase;
+ void __iomem *iobase = NULL;
+ void __iomem *mmio_base = NULL;
+ unsigned int num_irq_vectors;
unsigned int capabilities;
unsigned int caps_in_use;
unsigned long cmd;
@@ -445,16 +598,29 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
return error;
}
- error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
- if (error) {
- dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
- return error;
- }
+ /*
+ * The VMCI device with mmio access to registers requests 256KB
+ * for BAR1. If present, driver will use new VMCI device
+ * functionality for register access and datagram send/recv.
+ */
- iobase = pcim_iomap_table(pdev)[0];
+ if (pci_resource_len(pdev, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE) {
+ dev_info(&pdev->dev, "MMIO register access is available\n");
+ mmio_base = pci_iomap_range(pdev, 1, VMCI_MMIO_ACCESS_OFFSET,
+ VMCI_MMIO_ACCESS_SIZE);
+ /* If the map fails, we fall back to IOIO access. */
+ if (!mmio_base)
+ dev_warn(&pdev->dev, "Failed to map MMIO register access\n");
+ }
- dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
- (unsigned long)iobase, pdev->irq);
+ if (!mmio_base) {
+ error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+ return error;
+ }
+ iobase = pcim_iomap_table(pdev)[0];
+ }
vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
if (!vmci_dev) {
@@ -466,17 +632,35 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->dev = &pdev->dev;
vmci_dev->exclusive_vectors = false;
vmci_dev->iobase = iobase;
+ vmci_dev->mmio_base = mmio_base;
tasklet_init(&vmci_dev->datagram_tasklet,
vmci_dispatch_dgs, (unsigned long)vmci_dev);
tasklet_init(&vmci_dev->bm_tasklet,
vmci_process_bitmap, (unsigned long)vmci_dev);
+ init_waitqueue_head(&vmci_dev->inout_wq);
- vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ if (mmio_base != NULL) {
+ vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+ &vmci_dev->tx_buffer_base,
+ GFP_KERNEL);
+ if (!vmci_dev->tx_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram tx buffer\n");
+ return -ENOMEM;
+ }
+
+ vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+ &vmci_dev->data_buffer_base,
+ GFP_KERNEL);
+ } else {
+ vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ }
if (!vmci_dev->data_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram buffer\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_free_data_buffers;
}
pci_set_master(pdev); /* To enable queue_pair functionality. */
@@ -490,11 +674,11 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
*
* Right now, we need datagrams. There are no fallbacks.
*/
- capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+ capabilities = vmci_read_reg(vmci_dev, VMCI_CAPS_ADDR);
if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO;
- goto err_free_data_buffer;
+ goto err_free_data_buffers;
}
caps_in_use = VMCI_CAPS_DATAGRAM;
@@ -522,19 +706,39 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->notification_bitmap = dma_alloc_coherent(
&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
GFP_KERNEL);
- if (!vmci_dev->notification_bitmap) {
+ if (!vmci_dev->notification_bitmap)
dev_warn(&pdev->dev,
"Unable to allocate notification bitmap\n");
- } else {
- memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+ else
caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
+ }
+
+ if (mmio_base != NULL) {
+ if (capabilities & VMCI_CAPS_DMA_DATAGRAM) {
+ caps_in_use |= VMCI_CAPS_DMA_DATAGRAM;
+ } else {
+ dev_err(&pdev->dev,
+ "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
+ error = -ENXIO;
+ goto err_free_data_buffers;
}
}
dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);
/* Let the host know which capabilities we intend to use. */
- iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);
+ vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR);
+
+ if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
+ /* Let the device know the size for pages passed down. */
+ vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT);
+
+ /* Configure the high order parts of the data in/out buffers. */
+ vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base),
+ VMCI_DATA_IN_HIGH_ADDR);
+ vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base),
+ VMCI_DATA_OUT_HIGH_ADDR);
+ }
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
@@ -561,7 +765,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Check host capabilities. */
error = vmci_check_host_caps(pdev);
if (error)
- goto err_remove_bitmap;
+ goto err_remove_vmci_dev_g;
/* Enable device. */
@@ -581,13 +785,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* Enable interrupts. Try MSI-X first, then MSI, and then fallback on
* legacy interrupts.
*/
- error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
- PCI_IRQ_MSIX);
+ if (vmci_dev->mmio_base != NULL)
+ num_irq_vectors = VMCI_MAX_INTRS;
+ else
+ num_irq_vectors = VMCI_MAX_INTRS_NOTIFICATION;
+ error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors,
+ PCI_IRQ_MSIX);
if (error < 0) {
error = pci_alloc_irq_vectors(pdev, 1, 1,
PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
if (error < 0)
- goto err_remove_bitmap;
+ goto err_unsubscribe_event;
} else {
vmci_dev->exclusive_vectors = true;
}
@@ -620,6 +828,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
pci_irq_vector(pdev, 1), error);
goto err_free_irq;
}
+ if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
+ error = request_irq(pci_irq_vector(pdev, 2),
+ vmci_interrupt_dma_datagram,
+ 0, KBUILD_MODNAME, vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to allocate irq %u: %d\n",
+ pci_irq_vector(pdev, 2), error);
+ goto err_free_bm_irq;
+ }
+ }
}
dev_dbg(&pdev->dev, "Registered device\n");
@@ -630,17 +849,20 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
cmd = VMCI_IMR_DATAGRAM;
if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
- iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+ if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM)
+ cmd |= VMCI_IMR_DMA_DATAGRAM;
+ vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR);
/* Enable interrupts. */
- iowrite32(VMCI_CONTROL_INT_ENABLE,
- vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_INT_ENABLE, VMCI_CONTROL_ADDR);
pci_set_drvdata(pdev, vmci_dev);
vmci_call_vsock_callback(false);
return 0;
+err_free_bm_irq:
+ free_irq(pci_irq_vector(pdev, 1), vmci_dev);
err_free_irq:
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
tasklet_kill(&vmci_dev->datagram_tasklet);
@@ -649,29 +871,28 @@ err_free_irq:
err_disable_msi:
pci_free_irq_vectors(pdev);
+err_unsubscribe_event:
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
if (vmci_err < VMCI_SUCCESS)
dev_warn(&pdev->dev,
"Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
-err_remove_bitmap:
- if (vmci_dev->notification_bitmap) {
- iowrite32(VMCI_CONTROL_RESET,
- vmci_dev->iobase + VMCI_CONTROL_ADDR);
- dma_free_coherent(&pdev->dev, PAGE_SIZE,
- vmci_dev->notification_bitmap,
- vmci_dev->notification_base);
- }
-
err_remove_vmci_dev_g:
spin_lock_irq(&vmci_dev_spinlock);
vmci_pdev = NULL;
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
-err_free_data_buffer:
- vfree(vmci_dev->data_buffer);
+ if (vmci_dev->notification_bitmap) {
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vmci_dev->notification_bitmap,
+ vmci_dev->notification_base);
+ }
+
+err_free_data_buffers:
+ vmci_free_dg_buffers(vmci_dev);
/* The rest are managed resources and will be freed by PCI core */
return error;
@@ -700,15 +921,18 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
spin_unlock_irq(&vmci_dev_spinlock);
dev_dbg(&pdev->dev, "Resetting vmci device\n");
- iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);
/*
* Free IRQ and then disable MSI/MSI-X as appropriate. For
* MSI-X, we might have multiple vectors, each with their own
* IRQ, which we must free too.
*/
- if (vmci_dev->exclusive_vectors)
+ if (vmci_dev->exclusive_vectors) {
free_irq(pci_irq_vector(pdev, 1), vmci_dev);
+ if (vmci_dev->mmio_base != NULL)
+ free_irq(pci_irq_vector(pdev, 2), vmci_dev);
+ }
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
pci_free_irq_vectors(pdev);
@@ -726,7 +950,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
vmci_dev->notification_base);
}
- vfree(vmci_dev->data_buffer);
+ vmci_free_dg_buffers(vmci_dev);
+
+ if (vmci_dev->mmio_base != NULL)
+ pci_iounmap(pdev, vmci_dev->mmio_base);
/* The rest are managed resources and will be freed by PCI core */
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 58cfaffa3c2d..265b3889f9d7 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -806,6 +806,7 @@ static void sd_request(struct work_struct *work)
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
+ struct device *dev = &host->pdev->dev;
unsigned int data_size = 0;
int err;
@@ -1080,6 +1081,7 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ struct device *dev = &host->pdev->dev;
if (host->eject)
return;
@@ -1128,6 +1130,7 @@ static int sdmmc_get_ro(struct mmc_host *mmc)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ struct device *dev = &host->pdev->dev;
int ro = 0;
u32 val;
@@ -1153,6 +1156,7 @@ static int sdmmc_get_cd(struct mmc_host *mmc)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ struct device *dev = &host->pdev->dev;
int cd = 0;
u32 val;
@@ -1251,6 +1255,7 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ struct device *dev = &host->pdev->dev;
int err = 0;
u8 voltage;
@@ -1303,6 +1308,7 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ struct device *dev = &host->pdev->dev;
int err = 0;
if (host->eject)
@@ -1495,12 +1501,12 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
realtek_init_host(host);
- if (pcr->rtd3_en) {
- pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
-
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
mmc_add_host(mmc);
@@ -1521,11 +1527,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
pcr->slots[RTSX_SD_CARD].card_event = NULL;
mmc = host->mmc;
- if (pcr->rtd3_en) {
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- }
-
cancel_work_sync(&host->work);
mutex_lock(&host->host_mutex);
@@ -1548,6 +1549,9 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
flush_work(&host->work);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
mmc_free_host(mmc);
dev_dbg(&(pdev->dev),
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index eef87b28d6c8..fc6090366684 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -743,8 +743,7 @@ int del_mtd_device(struct mtd_info *mtd)
debugfs_remove_recursive(mtd->dbg.dfs_dir);
/* Try to remove the NVMEM provider */
- if (mtd->nvmem)
- nvmem_unregister(mtd->nvmem);
+ nvmem_unregister(mtd->nvmem);
device_unregister(&mtd->dev);
@@ -923,8 +922,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
return 0;
err:
- if (mtd->otp_user_nvmem)
- nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_user_nvmem);
return err;
}
@@ -1028,11 +1026,8 @@ int mtd_device_unregister(struct mtd_info *master)
memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
}
- if (master->otp_user_nvmem)
- nvmem_unregister(master->otp_user_nvmem);
-
- if (master->otp_factory_nvmem)
- nvmem_unregister(master->otp_factory_nvmem);
+ nvmem_unregister(master->otp_user_nvmem);
+ nvmem_unregister(master->otp_factory_nvmem);
err = del_mtd_partitions(master);
if (err)
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 22f4709768d1..49bedbe6316c 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -29,6 +29,20 @@
*/
#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS
+/**
+ * struct mux_state - Represents a mux controller state specific to a given
+ * consumer.
+ * @mux: Pointer to a mux controller.
+ * @state: State of the mux to be selected.
+ *
+ * This structure is specific to the consumer that acquires it and has
+ * information specific to that consumer.
+ */
+struct mux_state {
+ struct mux_control *mux;
+ unsigned int state;
+};
+
static struct class mux_class = {
.name = "mux",
.owner = THIS_MODULE,
@@ -341,7 +355,8 @@ static void mux_control_delay(struct mux_control *mux, unsigned int delay_us)
* On successfully selecting the mux-control state, it will be locked until
* there is a call to mux_control_deselect(). If the mux-control is already
* selected when mux_control_select() is called, the caller will be blocked
- * until mux_control_deselect() is called (by someone else).
+ * until mux_control_deselect() or mux_state_deselect() is called (by someone
+ * else).
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
@@ -371,13 +386,37 @@ int mux_control_select_delay(struct mux_control *mux, unsigned int state,
EXPORT_SYMBOL_GPL(mux_control_select_delay);
/**
+ * mux_state_select_delay() - Select the given multiplexer state.
+ * @mstate: The mux-state to select.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
+ *
+ * On successfully selecting the mux-state, its mux-control will be locked
+ * until there is a call to mux_state_deselect(). If the mux-control is already
+ * selected when mux_state_select() is called, the caller will be blocked
+ * until mux_state_deselect() or mux_control_deselect() is called (by someone
+ * else).
+ *
+ * Therefore, make sure to call mux_state_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_state_deselect() if mux_state_select() fails.
+ *
+ * Return: 0 when the mux-state has been selected or a negative
+ * errno on error.
+ */
+int mux_state_select_delay(struct mux_state *mstate, unsigned int delay_us)
+{
+ return mux_control_select_delay(mstate->mux, mstate->state, delay_us);
+}
+EXPORT_SYMBOL_GPL(mux_state_select_delay);
+
+/**
* mux_control_try_select_delay() - Try to select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
* @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
- * mux_control_deselect() called.
+ * mux_control_deselect() is called.
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
@@ -406,6 +445,27 @@ int mux_control_try_select_delay(struct mux_control *mux, unsigned int state,
EXPORT_SYMBOL_GPL(mux_control_try_select_delay);
/**
+ * mux_state_try_select_delay() - Try to select the given multiplexer state.
+ * @mstate: The mux-state to select.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
+ *
+ * On successfully selecting the mux-state, its mux-control will be locked
+ * until mux_state_deselect() is called.
+ *
+ * Therefore, make sure to call mux_state_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_state_deselect() if mux_state_try_select() fails.
+ *
+ * Return: 0 when the mux-state has been selected or a negative errno on
+ * error. Specifically -EBUSY if the mux-control is contended.
+ */
+int mux_state_try_select_delay(struct mux_state *mstate, unsigned int delay_us)
+{
+ return mux_control_try_select_delay(mstate->mux, mstate->state, delay_us);
+}
+EXPORT_SYMBOL_GPL(mux_state_try_select_delay);
+
+/**
* mux_control_deselect() - Deselect the previously selected multiplexer state.
* @mux: The mux-control to deselect.
*
@@ -431,6 +491,24 @@ int mux_control_deselect(struct mux_control *mux)
}
EXPORT_SYMBOL_GPL(mux_control_deselect);
+/**
+ * mux_state_deselect() - Deselect the previously selected multiplexer state.
+ * @mstate: The mux-state to deselect.
+ *
+ * It is required that a single call is made to mux_state_deselect() for
+ * each and every successful call made to either of mux_state_select() or
+ * mux_state_try_select().
+ *
+ * Return: 0 on success and a negative errno on error. An error can only
+ * occur if the mux has an idle state. Note that even if an error occurs, the
+ * mux-control is unlocked and is thus free for the next access.
+ */
+int mux_state_deselect(struct mux_state *mstate)
+{
+ return mux_control_deselect(mstate->mux);
+}
+EXPORT_SYMBOL_GPL(mux_state_deselect);
+
/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
@@ -441,14 +519,17 @@ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
return dev ? to_mux_chip(dev) : NULL;
}
-/**
- * mux_control_get() - Get the mux-control for a device.
+/*
+ * mux_get() - Get the mux-control for a device.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
+ * @state: Pointer to where the requested state is returned, or NULL when
+ * the required multiplexer states are handled by other means.
*
* Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
*/
-struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+static struct mux_control *mux_get(struct device *dev, const char *mux_name,
+ unsigned int *state)
{
struct device_node *np = dev->of_node;
struct of_phandle_args args;
@@ -458,8 +539,12 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
int ret;
if (mux_name) {
- index = of_property_match_string(np, "mux-control-names",
- mux_name);
+ if (state)
+ index = of_property_match_string(np, "mux-state-names",
+ mux_name);
+ else
+ index = of_property_match_string(np, "mux-control-names",
+ mux_name);
if (index < 0) {
dev_err(dev, "mux controller '%s' not found\n",
mux_name);
@@ -467,12 +552,17 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
}
}
- ret = of_parse_phandle_with_args(np,
- "mux-controls", "#mux-control-cells",
- index, &args);
+ if (state)
+ ret = of_parse_phandle_with_args(np,
+ "mux-states", "#mux-state-cells",
+ index, &args);
+ else
+ ret = of_parse_phandle_with_args(np,
+ "mux-controls", "#mux-control-cells",
+ index, &args);
if (ret) {
- dev_err(dev, "%pOF: failed to get mux-control %s(%i)\n",
- np, mux_name ?: "", index);
+ dev_err(dev, "%pOF: failed to get mux-%s %s(%i)\n",
+ np, state ? "state" : "control", mux_name ?: "", index);
return ERR_PTR(ret);
}
@@ -481,17 +571,35 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (!mux_chip)
return ERR_PTR(-EPROBE_DEFER);
- if (args.args_count > 1 ||
- (!args.args_count && (mux_chip->controllers > 1))) {
- dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
- np, args.np);
- put_device(&mux_chip->dev);
- return ERR_PTR(-EINVAL);
- }
-
controller = 0;
- if (args.args_count)
- controller = args.args[0];
+ if (state) {
+ if (args.args_count > 2 || args.args_count == 0 ||
+ (args.args_count < 2 && mux_chip->controllers > 1)) {
+ dev_err(dev, "%pOF: wrong #mux-state-cells for %pOF\n",
+ np, args.np);
+ put_device(&mux_chip->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args.args_count == 2) {
+ controller = args.args[0];
+ *state = args.args[1];
+ } else {
+ *state = args.args[0];
+ }
+
+ } else {
+ if (args.args_count > 1 ||
+ (!args.args_count && mux_chip->controllers > 1)) {
+ dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
+ np, args.np);
+ put_device(&mux_chip->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args.args_count)
+ controller = args.args[0];
+ }
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
@@ -502,6 +610,18 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
return &mux_chip->mux[controller];
}
+
+/**
+ * mux_control_get() - Get the mux-control for a device.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+{
+ return mux_get(dev, mux_name, NULL);
+}
EXPORT_SYMBOL_GPL(mux_control_get);
/**
@@ -554,6 +674,81 @@ struct mux_control *devm_mux_control_get(struct device *dev,
EXPORT_SYMBOL_GPL(devm_mux_control_get);
/*
+ * mux_state_get() - Get the mux-state for a device.
+ * @dev: The device that needs a mux-state.
+ * @mux_name: The name identifying the mux-state.
+ *
+ * Return: A pointer to the mux-state, or an ERR_PTR with a negative errno.
+ */
+static struct mux_state *mux_state_get(struct device *dev, const char *mux_name)
+{
+ struct mux_state *mstate;
+
+ mstate = kzalloc(sizeof(*mstate), GFP_KERNEL);
+ if (!mstate)
+ return ERR_PTR(-ENOMEM);
+
+ mstate->mux = mux_get(dev, mux_name, &mstate->state);
+ if (IS_ERR(mstate->mux)) {
+ int err = PTR_ERR(mstate->mux);
+
+ kfree(mstate);
+ return ERR_PTR(err);
+ }
+
+ return mstate;
+}
+
+/*
+ * mux_state_put() - Put away the mux-state for good.
+ * @mstate: The mux-state to put away.
+ *
+ * mux_state_put() reverses the effects of mux_state_get().
+ */
+static void mux_state_put(struct mux_state *mstate)
+{
+ mux_control_put(mstate->mux);
+ kfree(mstate);
+}
+
+static void devm_mux_state_release(struct device *dev, void *res)
+{
+ struct mux_state *mstate = *(struct mux_state **)res;
+
+ mux_state_put(mstate);
+}
+
+/**
+ * devm_mux_state_get() - Get the mux-state for a device, with resource
+ * management.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: Pointer to the mux-state, or an ERR_PTR with a negative errno.
+ */
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name)
+{
+ struct mux_state **ptr, *mstate;
+
+ ptr = devres_alloc(devm_mux_state_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ mstate = mux_state_get(dev, mux_name);
+ if (IS_ERR(mstate)) {
+ devres_free(ptr);
+ return mstate;
+ }
+
+ *ptr = mstate;
+ devres_add(dev, ptr);
+
+ return mstate;
+}
+EXPORT_SYMBOL_GPL(devm_mux_state_get);
+
+/*
* Using subsys_initcall instead of module_init here to try to ensure - for
* the non-modular case - that the subsystem is initialized when mux consumers
* and mux controllers start to use it.
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index da414617a54d..555aa77a574d 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -300,4 +300,28 @@ config NVMEM_BRCM_NVRAM
This driver provides support for Broadcom's NVRAM that can be accessed
using I/O mapping.
+config NVMEM_LAYERSCAPE_SFP
+ tristate "Layerscape SFP (Security Fuse Processor) support"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides support to read the eFuses on Freescale
+ Layerscape SoC's. For example, the vendor provides a per part
+ unique ID there.
+
+ This driver can also be built as a module. If so, the module
+ will be called layerscape-sfp.
+
+config NVMEM_SUNPLUS_OCOTP
+ tristate "Sunplus SoC OTP support"
+ depends on SOC_SP7021 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the On-chip OTP controller (OCOTP) available
+ on Sunplus SoCs. It provides access to 128 bytes of one-time
+ programmable eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sunplus-ocotp.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index dcbbde35b6a8..891958e29d25 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -61,3 +61,7 @@ obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
nvmem-rmem-y := rmem.o
obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
nvmem_brcm_nvram-y := brcm_nvram.o
+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
+nvmem-layerscape-sfp-y := layerscape-sfp.o
+obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
+nvmem_sunplus_ocotp-y := sunplus-ocotp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9fd1602b539d..f58d9bc7aa08 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -903,13 +903,14 @@ static void nvmem_device_release(struct kref *kref)
*/
void nvmem_unregister(struct nvmem_device *nvmem)
{
- kref_put(&nvmem->refcnt, nvmem_device_release);
+ if (nvmem)
+ kref_put(&nvmem->refcnt, nvmem_device_release);
}
EXPORT_SYMBOL_GPL(nvmem_unregister);
-static void devm_nvmem_release(struct device *dev, void *res)
+static void devm_nvmem_unregister(void *nvmem)
{
- nvmem_unregister(*(struct nvmem_device **)res);
+ nvmem_unregister(nvmem);
}
/**
@@ -926,47 +927,21 @@ static void devm_nvmem_release(struct device *dev, void *res)
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *config)
{
- struct nvmem_device **ptr, *nvmem;
-
- ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct nvmem_device *nvmem;
+ int ret;
nvmem = nvmem_register(config);
+ if (IS_ERR(nvmem))
+ return nvmem;
- if (!IS_ERR(nvmem)) {
- *ptr = nvmem;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
+ if (ret)
+ return ERR_PTR(ret);
return nvmem;
}
EXPORT_SYMBOL_GPL(devm_nvmem_register);
-static int devm_nvmem_match(struct device *dev, void *res, void *data)
-{
- struct nvmem_device **r = res;
-
- return *r == data;
-}
-
-/**
- * devm_nvmem_unregister() - Unregister previously registered managed nvmem
- * device.
- *
- * @dev: Device that uses the nvmem device.
- * @nvmem: Pointer to previously registered nvmem device.
- *
- * Return: Will be negative on error or zero on success.
- */
-int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
-{
- return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
-}
-EXPORT_SYMBOL(devm_nvmem_unregister);
-
static struct nvmem_device *__nvmem_device_get(void *data,
int (*match)(struct device *dev, const void *data))
{
diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c
new file mode 100644
index 000000000000..e591c1511e33
--- /dev/null
+++ b/drivers/nvmem/layerscape-sfp.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape SFP driver
+ *
+ * Copyright (c) 2022 Michael Walle <michael@walle.cc>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200
+
+struct layerscape_sfp_priv {
+ void __iomem *base;
+};
+
+struct layerscape_sfp_data {
+ int size;
+};
+
+static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct layerscape_sfp_priv *priv = context;
+
+ memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset,
+ bytes);
+
+ return 0;
+}
+
+static struct nvmem_config layerscape_sfp_nvmem_config = {
+ .name = "fsl-sfp",
+ .reg_read = layerscape_sfp_read,
+};
+
+static int layerscape_sfp_probe(struct platform_device *pdev)
+{
+ const struct layerscape_sfp_data *data;
+ struct layerscape_sfp_priv *priv;
+ struct nvmem_device *nvmem;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ data = device_get_match_data(&pdev->dev);
+
+ layerscape_sfp_nvmem_config.size = data->size;
+ layerscape_sfp_nvmem_config.dev = &pdev->dev;
+ layerscape_sfp_nvmem_config.priv = priv;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &layerscape_sfp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct layerscape_sfp_data ls1028a_data = {
+ .size = 0x88,
+};
+
+static const struct of_device_id layerscape_sfp_dt_ids[] = {
+ { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, layerscape_sfp_dt_ids);
+
+static struct platform_driver layerscape_sfp_driver = {
+ .probe = layerscape_sfp_probe,
+ .driver = {
+ .name = "layerscape_sfp",
+ .of_match_table = layerscape_sfp_dt_ids,
+ },
+};
+module_platform_driver(layerscape_sfp_driver);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("Layerscape Security Fuse Processor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index 07c9f38c1c60..13eb14316f46 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -209,8 +209,7 @@ static int meson_mx_efuse_probe(struct platform_device *pdev)
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
- efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name,
- GFP_KERNEL);
+ efuse->config.name = drvdata->name;
efuse->config.owner = THIS_MODULE;
efuse->config.dev = &pdev->dev;
efuse->config.priv = efuse;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index c500d6235bf6..162132c7dab9 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -22,7 +22,7 @@
/* Amount of time required to hold charge to blow fuse in micro-seconds */
#define QFPROM_FUSE_BLOW_POLL_US 100
-#define QFPROM_FUSE_BLOW_TIMEOUT_US 1000
+#define QFPROM_FUSE_BLOW_TIMEOUT_US 10000
#define QFPROM_BLOW_STATUS_OFFSET 0x048
#define QFPROM_BLOW_STATUS_BUSY 0x1
@@ -244,7 +244,7 @@ err_clk_prepared:
}
/**
- * qfprom_efuse_reg_write() - Write to fuses.
+ * qfprom_reg_write() - Write to fuses.
* @context: Our driver data.
* @reg: The offset to write at.
* @_val: Pointer to data to write.
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
new file mode 100644
index 000000000000..2dc59c22eb55
--- /dev/null
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * The OCOTP driver for Sunplus SP7021
+ *
+ * Copyright (C) 2019 Sunplus Technology Inc., All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/*
+ * OTP memory
+ * Each bank contains 4 words (32 bits).
+ * Bank 0 starts at offset 0 from the base.
+ */
+
+#define OTP_WORDS_PER_BANK 4
+#define OTP_WORD_SIZE sizeof(u32)
+#define OTP_BIT_ADDR_OF_BANK (8 * OTP_WORD_SIZE * OTP_WORDS_PER_BANK)
+#define QAC628_OTP_NUM_BANKS 8
+#define QAC628_OTP_SIZE (QAC628_OTP_NUM_BANKS * OTP_WORDS_PER_BANK * OTP_WORD_SIZE)
+#define OTP_READ_TIMEOUT_US 200000
+
+/* HB_GPIO */
+#define ADDRESS_8_DATA 0x20
+
+/* OTP_RX */
+#define OTP_CONTROL_2 0x48
+#define OTP_RD_PERIOD GENMASK(15, 8)
+#define OTP_RD_PERIOD_MASK ~GENMASK(15, 8)
+#define CPU_CLOCK FIELD_PREP(OTP_RD_PERIOD, 30)
+#define SEL_BAK_KEY2 BIT(5)
+#define SEL_BAK_KEY2_MASK ~BIT(5)
+#define SW_TRIM_EN BIT(4)
+#define SW_TRIM_EN_MASK ~BIT(4)
+#define SEL_BAK_KEY BIT(3)
+#define SEL_BAK_KEY_MASK ~BIT(3)
+#define OTP_READ BIT(2)
+#define OTP_LOAD_SECURE_DATA BIT(1)
+#define OTP_LOAD_SECURE_DATA_MASK ~BIT(1)
+#define OTP_DO_CRC BIT(0)
+#define OTP_DO_CRC_MASK ~BIT(0)
+#define OTP_STATUS 0x4c
+#define OTP_READ_DONE BIT(4)
+#define OTP_READ_DONE_MASK ~BIT(4)
+#define OTP_LOAD_SECURE_DONE_MASK ~BIT(2)
+#define OTP_READ_ADDRESS 0x50
+
+enum base_type {
+ HB_GPIO,
+ OTPRX,
+ BASEMAX,
+};
+
+struct sp_ocotp_priv {
+ struct device *dev;
+ void __iomem *base[BASEMAX];
+ struct clk *clk;
+};
+
+struct sp_ocotp_data {
+ int size;
+};
+
+const struct sp_ocotp_data sp_otp_v0 = {
+ .size = QAC628_OTP_SIZE,
+};
+
+static int sp_otp_read_real(struct sp_ocotp_priv *otp, int addr, char *value)
+{
+ unsigned int addr_data;
+ unsigned int byte_shift;
+ unsigned int status;
+ int ret;
+
+ addr_data = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ addr_data = addr_data / OTP_WORD_SIZE;
+
+ byte_shift = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ byte_shift = byte_shift % OTP_WORD_SIZE;
+
+ addr = addr / (OTP_WORD_SIZE * OTP_WORDS_PER_BANK);
+ addr = addr * OTP_BIT_ADDR_OF_BANK;
+
+ writel(readl(otp->base[OTPRX] + OTP_STATUS) & OTP_READ_DONE_MASK &
+ OTP_LOAD_SECURE_DONE_MASK, otp->base[OTPRX] + OTP_STATUS);
+ writel(addr, otp->base[OTPRX] + OTP_READ_ADDRESS);
+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) | OTP_READ,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) & SEL_BAK_KEY2_MASK & SW_TRIM_EN_MASK
+ & SEL_BAK_KEY_MASK & OTP_LOAD_SECURE_DATA_MASK & OTP_DO_CRC_MASK,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+ writel((readl(otp->base[OTPRX] + OTP_CONTROL_2) & OTP_RD_PERIOD_MASK) | CPU_CLOCK,
+ otp->base[OTPRX] + OTP_CONTROL_2);
+
+ ret = readl_poll_timeout(otp->base[OTPRX] + OTP_STATUS, status,
+ status & OTP_READ_DONE, 10, OTP_READ_TIMEOUT_US);
+
+ if (ret < 0)
+ return ret;
+
+ *value = (readl(otp->base[HB_GPIO] + ADDRESS_8_DATA + addr_data * OTP_WORD_SIZE)
+ >> (8 * byte_shift)) & 0xff;
+
+ return ret;
+}
+
+static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t bytes)
+{
+ struct sp_ocotp_priv *otp = priv;
+ unsigned int addr;
+ char *buf = value;
+ char val[4];
+ int ret;
+
+ ret = clk_enable(otp->clk);
+ if (ret)
+ return ret;
+
+ *buf = 0;
+ for (addr = offset; addr < (offset + bytes); addr++) {
+ ret = sp_otp_read_real(otp, addr, val);
+ if (ret < 0) {
+ dev_err(otp->dev, "OTP read fail:%d at %d", ret, addr);
+ goto disable_clk;
+ }
+
+ *buf++ = *val;
+ }
+
+disable_clk:
+ clk_disable(otp->clk);
+
+ return ret;
+}
+
+static struct nvmem_config sp_ocotp_nvmem_config = {
+ .name = "sp-ocotp",
+ .read_only = true,
+ .word_size = 1,
+ .size = QAC628_OTP_SIZE,
+ .stride = 1,
+ .reg_read = sp_ocotp_read,
+ .owner = THIS_MODULE,
+};
+
+static int sp_ocotp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct sp_ocotp_priv *otp;
+ struct resource *res;
+ int ret;
+
+ otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio");
+ otp->base[HB_GPIO] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(otp->base[HB_GPIO]))
+ return PTR_ERR(otp->base[HB_GPIO]);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx");
+ otp->base[OTPRX] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(otp->base[OTPRX]))
+ return PTR_ERR(otp->base[OTPRX]);
+
+ otp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(otp->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(otp->clk),
+ "devm_clk_get fail\n");
+
+ ret = clk_prepare(otp->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare clk: %d\n", ret);
+ return ret;
+ }
+
+ sp_ocotp_nvmem_config.priv = otp;
+ sp_ocotp_nvmem_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config);
+ if (IS_ERR(nvmem))
+ return dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
+ "register nvmem device fail\n");
+
+ platform_set_drvdata(pdev, nvmem);
+
+ dev_dbg(dev, "banks:%d x wpb:%d x wsize:%d = %d",
+ (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
+ (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
+
+ dev_info(dev, "by Sunplus (C) 2020");
+
+ return 0;
+}
+
+static const struct of_device_id sp_ocotp_dt_ids[] = {
+ { .compatible = "sunplus,sp7021-ocotp", .data = &sp_otp_v0 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sp_ocotp_dt_ids);
+
+static struct platform_driver sp_otp_driver = {
+ .probe = sp_ocotp_probe,
+ .driver = {
+ .name = "sunplus,sp7021-ocotp",
+ .of_match_table = sp_ocotp_dt_ids,
+ }
+};
+module_platform_driver(sp_otp_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>");
+MODULE_DESCRIPTION("Sunplus On-Chip OTP driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 275b9155e473..5750e1f4bcdb 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -184,6 +184,11 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
+static const struct sunxi_sid_cfg sun20i_d1_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+};
+
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
@@ -200,6 +205,7 @@ static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun20i-d1-sid", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 793350028906..a16b74f32aa9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -514,6 +514,7 @@ static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "qcom,smem" },
{ .compatible = "ramoops" },
{ .compatible = "nvmem-rmem" },
+ { .compatible = "google,open-dice" },
{}
};
diff --git a/drivers/peci/Kconfig b/drivers/peci/Kconfig
new file mode 100644
index 000000000000..89872ad83320
--- /dev/null
+++ b/drivers/peci/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menuconfig PECI
+ tristate "PECI support"
+ help
+ The Platform Environment Control Interface (PECI) is an interface
+ that provides a communication channel to Intel processors and
+ chipset components from external monitoring or control devices.
+
+ If you are building a Baseboard Management Controller (BMC) kernel
+ for Intel platform say Y here and also to the specific driver for
+ your adapter(s) below. If unsure say N.
+
+ This support is also available as a module. If so, the module
+ will be called peci.
+
+if PECI
+
+config PECI_CPU
+ tristate "PECI CPU"
+ select AUXILIARY_BUS
+ help
+ This option enables peci-cpu driver for Intel processors. It is
+ responsible for creating auxiliary devices that can subsequently
+ be used by other drivers in order to perform various
+ functionalities such as e.g. temperature monitoring.
+
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-cpu.
+
+source "drivers/peci/controller/Kconfig"
+
+endif # PECI
diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile
new file mode 100644
index 000000000000..7de18137e738
--- /dev/null
+++ b/drivers/peci/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# Core functionality
+peci-y := core.o request.o device.o sysfs.o
+obj-$(CONFIG_PECI) += peci.o
+peci-cpu-y := cpu.o
+obj-$(CONFIG_PECI_CPU) += peci-cpu.o
+
+# Hardware specific bus drivers
+obj-y += controller/
diff --git a/drivers/peci/controller/Kconfig b/drivers/peci/controller/Kconfig
new file mode 100644
index 000000000000..2fc5e2abb74a
--- /dev/null
+++ b/drivers/peci/controller/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PECI_ASPEED
+ tristate "ASPEED PECI support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF
+ depends on HAS_IOMEM
+ depends on COMMON_CLK
+ help
+ This option enables PECI controller driver for ASPEED AST2400,
+ AST2500 and AST2600 SoCs. It allows BMC to discover devices
+ connected to it, and communicate with them using PECI protocol.
+
+ Say Y here if your system runs on ASPEED SoC and you are using it
+ as BMC for Intel platform.
+
+ This driver can also be built as a module. If so, the module will
+ be called peci-aspeed.
diff --git a/drivers/peci/controller/Makefile b/drivers/peci/controller/Makefile
new file mode 100644
index 000000000000..022c28ef1bf0
--- /dev/null
+++ b/drivers/peci/controller/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PECI_ASPEED) += peci-aspeed.o
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
new file mode 100644
index 000000000000..1925ddc13f00
--- /dev/null
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2012-2017 ASPEED Technology Inc.
+// Copyright (c) 2018-2021 Intel Corporation
+
+#include <asm/unaligned.h>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* ASPEED PECI Registers */
+/* Control Register */
+#define ASPEED_PECI_CTRL 0x00
+#define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
+#define ASPEED_PECI_CTRL_RD_MODE_MASK GENMASK(13, 12)
+#define ASPEED_PECI_CTRL_RD_MODE_DBG BIT(13)
+#define ASPEED_PECI_CTRL_RD_MODE_COUNT BIT(12)
+#define ASPEED_PECI_CTRL_CLK_SRC_HCLK BIT(11)
+#define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
+#define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
+#define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
+#define ASPEED_PECI_CTRL_BUS_CONTENTION_EN BIT(5)
+#define ASPEED_PECI_CTRL_PECI_EN BIT(4)
+#define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
+
+/* Timing Negotiation Register */
+#define ASPEED_PECI_TIMING_NEGOTIATION 0x04
+#define ASPEED_PECI_T_NEGO_MSG_MASK GENMASK(15, 8)
+#define ASPEED_PECI_T_NEGO_ADDR_MASK GENMASK(7, 0)
+
+/* Command Register */
+#define ASPEED_PECI_CMD 0x08
+#define ASPEED_PECI_CMD_PIN_MONITORING BIT(31)
+#define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
+#define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3
+#define ASPEED_PECI_CMD_IDLE_MASK \
+ (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING)
+#define ASPEED_PECI_CMD_FIRE BIT(0)
+
+/* Read/Write Length Register */
+#define ASPEED_PECI_RW_LENGTH 0x0c
+#define ASPEED_PECI_AW_FCS_EN BIT(31)
+#define ASPEED_PECI_RD_LEN_MASK GENMASK(23, 16)
+#define ASPEED_PECI_WR_LEN_MASK GENMASK(15, 8)
+#define ASPEED_PECI_TARGET_ADDR_MASK GENMASK(7, 0)
+
+/* Expected FCS Data Register */
+#define ASPEED_PECI_EXPECTED_FCS 0x10
+#define ASPEED_PECI_EXPECTED_RD_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8)
+#define ASPEED_PECI_EXPECTED_WR_FCS_MASK GENMASK(7, 0)
+
+/* Captured FCS Data Register */
+#define ASPEED_PECI_CAPTURED_FCS 0x14
+#define ASPEED_PECI_CAPTURED_RD_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_CAPTURED_WR_FCS_MASK GENMASK(7, 0)
+
+/* Interrupt Register */
+#define ASPEED_PECI_INT_CTRL 0x18
+#define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
+#define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
+#define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
+#define ASPEED_PECI_MESSAGE_NEGO 2
+#define ASPEED_PECI_INT_MASK GENMASK(4, 0)
+#define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
+#define ASPEED_PECI_INT_BUS_CONTENTION BIT(3)
+#define ASPEED_PECI_INT_WR_FCS_BAD BIT(2)
+#define ASPEED_PECI_INT_WR_FCS_ABORT BIT(1)
+#define ASPEED_PECI_INT_CMD_DONE BIT(0)
+
+/* Interrupt Status Register */
+#define ASPEED_PECI_INT_STS 0x1c
+#define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
+ /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
+
+/* Rx/Tx Data Buffer Registers */
+#define ASPEED_PECI_WR_DATA0 0x20
+#define ASPEED_PECI_WR_DATA1 0x24
+#define ASPEED_PECI_WR_DATA2 0x28
+#define ASPEED_PECI_WR_DATA3 0x2c
+#define ASPEED_PECI_RD_DATA0 0x30
+#define ASPEED_PECI_RD_DATA1 0x34
+#define ASPEED_PECI_RD_DATA2 0x38
+#define ASPEED_PECI_RD_DATA3 0x3c
+#define ASPEED_PECI_WR_DATA4 0x40
+#define ASPEED_PECI_WR_DATA5 0x44
+#define ASPEED_PECI_WR_DATA6 0x48
+#define ASPEED_PECI_WR_DATA7 0x4c
+#define ASPEED_PECI_RD_DATA4 0x50
+#define ASPEED_PECI_RD_DATA5 0x54
+#define ASPEED_PECI_RD_DATA6 0x58
+#define ASPEED_PECI_RD_DATA7 0x5c
+#define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
+
+/* Timing Negotiation */
+#define ASPEED_PECI_CLK_FREQUENCY_MIN 2000
+#define ASPEED_PECI_CLK_FREQUENCY_DEFAULT 1000000
+#define ASPEED_PECI_CLK_FREQUENCY_MAX 2000000
+#define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
+/* Timeout */
+#define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US (50 * USEC_PER_MSEC)
+#define ASPEED_PECI_IDLE_CHECK_INTERVAL_US (10 * USEC_PER_MSEC)
+#define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 1000
+
+#define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1)
+#define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp)
+#define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \
+ (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp))
+
+struct aspeed_peci {
+ struct peci_controller *controller;
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rst;
+ int irq;
+ spinlock_t lock; /* to sync completion status handling */
+ struct completion xfer_complete;
+ struct clk *clk;
+ u32 clk_frequency;
+ u32 status;
+ u32 cmd_timeout_ms;
+};
+
+struct clk_aspeed_peci {
+ struct clk_hw hw;
+ struct aspeed_peci *aspeed_peci;
+};
+
+static void aspeed_peci_controller_enable(struct aspeed_peci *priv)
+{
+ u32 val = readl(priv->base + ASPEED_PECI_CTRL);
+
+ val |= ASPEED_PECI_CTRL_PECI_CLK_EN;
+ val |= ASPEED_PECI_CTRL_PECI_EN;
+
+ writel(val, priv->base + ASPEED_PECI_CTRL);
+}
+
+static void aspeed_peci_init_regs(struct aspeed_peci *priv)
+{
+ u32 val;
+
+ /* Clear interrupts */
+ writel(ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_STS);
+
+ /* Set timing negotiation mode and enable interrupts */
+ val = FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO);
+ val |= ASPEED_PECI_INT_MASK;
+ writel(val, priv->base + ASPEED_PECI_INT_CTRL);
+
+ val = FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
+ writel(val, priv->base + ASPEED_PECI_CTRL);
+}
+
+static int aspeed_peci_check_idle(struct aspeed_peci *priv)
+{
+ u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD);
+ int ret;
+
+ /*
+ * Under normal circumstances, we expect to be idle here.
+ * In case there were any errors/timeouts that led to the situation
+ * where the hardware is not in idle state - we need to reset and
+ * reinitialize it to avoid potential controller hang.
+ */
+ if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) {
+ ret = reset_control_assert(priv->rst);
+ if (ret) {
+ dev_err(priv->dev, "cannot assert reset control\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret) {
+ dev_err(priv->dev, "cannot deassert reset control\n");
+ return ret;
+ }
+
+ aspeed_peci_init_regs(priv);
+
+ ret = clk_set_rate(priv->clk, priv->clk_frequency);
+ if (ret < 0) {
+ dev_err(priv->dev, "cannot set clock frequency\n");
+ return ret;
+ }
+
+ aspeed_peci_controller_enable(priv);
+ }
+
+ return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
+ cmd_sts,
+ !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
+ ASPEED_PECI_IDLE_CHECK_INTERVAL_US,
+ ASPEED_PECI_IDLE_CHECK_TIMEOUT_US);
+}
+
+static int aspeed_peci_xfer(struct peci_controller *controller,
+ u8 addr, struct peci_request *req)
+{
+ struct aspeed_peci *priv = dev_get_drvdata(controller->dev.parent);
+ unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ u32 peci_head;
+ int ret, i;
+
+ if (req->tx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
+ req->rx.len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
+ return -EINVAL;
+
+ /* Check command sts and bus idle state */
+ ret = aspeed_peci_check_idle(priv);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ spin_lock_irq(&priv->lock);
+ reinit_completion(&priv->xfer_complete);
+
+ peci_head = FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK, addr) |
+ FIELD_PREP(ASPEED_PECI_WR_LEN_MASK, req->tx.len) |
+ FIELD_PREP(ASPEED_PECI_RD_LEN_MASK, req->rx.len);
+
+ writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
+
+ for (i = 0; i < req->tx.len; i += 4) {
+ u32 reg = (i < 16 ? ASPEED_PECI_WR_DATA0 : ASPEED_PECI_WR_DATA4) + i % 16;
+
+ writel(get_unaligned_le32(&req->tx.buf[i]), priv->base + reg);
+ }
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
+ dev_dbg(priv->dev, "HEAD : %#08x\n", peci_head);
+ print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len);
+#endif
+
+ priv->status = 0;
+ writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
+ spin_unlock_irq(&priv->lock);
+
+ ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ dev_dbg(priv->dev, "timeout waiting for a response\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ if (priv->status != ASPEED_PECI_INT_CMD_DONE) {
+ spin_unlock_irq(&priv->lock);
+ dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status);
+ return -EIO;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ /*
+ * We need to use dword reads for register access, make sure that the
+ * buffer size is multiple of 4-bytes.
+ */
+ BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE % 4);
+
+ for (i = 0; i < req->rx.len; i += 4) {
+ u32 reg = (i < 16 ? ASPEED_PECI_RD_DATA0 : ASPEED_PECI_RD_DATA4) + i % 16;
+ u32 rx_data = readl(priv->base + reg);
+
+ put_unaligned_le32(rx_data, &req->rx.buf[i]);
+ }
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
+ print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len);
+#endif
+ return 0;
+}
+
+static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
+{
+ struct aspeed_peci *priv = arg;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ status = readl(priv->base + ASPEED_PECI_INT_STS);
+ writel(status, priv->base + ASPEED_PECI_INT_STS);
+ priv->status |= (status & ASPEED_PECI_INT_MASK);
+
+ /*
+ * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
+ * set even in an error case.
+ */
+ if (status & ASPEED_PECI_INT_CMD_DONE)
+ complete(&priv->xfer_complete);
+
+ writel(0, priv->base + ASPEED_PECI_CMD);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void clk_aspeed_peci_find_div_values(unsigned long rate, int *msg_timing, int *clk_div_exp)
+{
+ unsigned long best_diff = ~0ul, diff;
+ int msg_timing_temp, clk_div_exp_temp, i, j;
+
+ for (i = 1; i <= 255; i++)
+ for (j = 0; j < 8; j++) {
+ diff = abs(rate - ASPEED_PECI_CLK_DIV1(i) * ASPEED_PECI_CLK_DIV2(j));
+ if (diff < best_diff) {
+ msg_timing_temp = i;
+ clk_div_exp_temp = j;
+ best_diff = diff;
+ }
+ }
+
+ *msg_timing = msg_timing_temp;
+ *clk_div_exp = clk_div_exp_temp;
+}
+
+static int clk_aspeed_peci_get_div(unsigned long rate, const unsigned long *prate)
+{
+ unsigned long this_rate = *prate / (4 * rate);
+ int msg_timing, clk_div_exp;
+
+ clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
+
+ return ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
+}
+
+static int clk_aspeed_peci_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
+ struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
+ unsigned long this_rate = prate / (4 * rate);
+ int clk_div_exp, msg_timing;
+ u32 val;
+
+ clk_aspeed_peci_find_div_values(this_rate, &msg_timing, &clk_div_exp);
+
+ val = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
+ val |= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_exp);
+ writel(val, aspeed_peci->base + ASPEED_PECI_CTRL);
+
+ val = FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK, msg_timing);
+ val |= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK, msg_timing);
+ writel(val, aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
+
+ return 0;
+}
+
+static long clk_aspeed_peci_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div = clk_aspeed_peci_get_div(rate, prate);
+
+ return DIV_ROUND_UP_ULL(*prate, div);
+}
+
+static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct clk_aspeed_peci *peci_clk = container_of(hw, struct clk_aspeed_peci, hw);
+ struct aspeed_peci *aspeed_peci = peci_clk->aspeed_peci;
+ int div, msg_timing, addr_timing, clk_div_exp;
+ u32 reg;
+
+ reg = readl(aspeed_peci->base + ASPEED_PECI_TIMING_NEGOTIATION);
+ msg_timing = FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK, reg);
+ addr_timing = FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK, reg);
+
+ if (msg_timing != addr_timing)
+ return 0;
+
+ reg = readl(aspeed_peci->base + ASPEED_PECI_CTRL);
+ clk_div_exp = FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK, reg);
+
+ div = ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp);
+
+ return DIV_ROUND_UP_ULL(prate, div);
+}
+
+static const struct clk_ops clk_aspeed_peci_ops = {
+ .set_rate = clk_aspeed_peci_set_rate,
+ .round_rate = clk_aspeed_peci_round_rate,
+ .recalc_rate = clk_aspeed_peci_recalc_rate,
+};
+
+/*
+ * PECI HW contains a clock divider which is a combination of:
+ * div0: 4 (fixed divider)
+ * div1: x + 1
+ * div2: 1 << y
+ * In other words, out_clk = in_clk / (div0 * div1 * div2)
+ * The resulting frequency is used by PECI Controller to drive the PECI bus to
+ * negotiate optimal transfer rate.
+ */
+static struct clk *devm_aspeed_peci_register_clk_div(struct device *dev, struct clk *parent,
+ struct aspeed_peci *priv)
+{
+ struct clk_aspeed_peci *peci_clk;
+ struct clk_init_data init;
+ const char *parent_name;
+ char name[32];
+ int ret;
+
+ snprintf(name, sizeof(name), "%s_div", dev_name(dev));
+
+ parent_name = __clk_get_name(parent);
+
+ init.ops = &clk_aspeed_peci_ops;
+ init.name = name;
+ init.parent_names = (const char* []) { parent_name };
+ init.num_parents = 1;
+ init.flags = 0;
+
+ peci_clk = devm_kzalloc(dev, sizeof(struct clk_aspeed_peci), GFP_KERNEL);
+ if (!peci_clk)
+ return ERR_PTR(-ENOMEM);
+
+ peci_clk->hw.init = &init;
+ peci_clk->aspeed_peci = priv;
+
+ ret = devm_clk_hw_register(dev, &peci_clk->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return peci_clk->hw.clk;
+}
+
+static void aspeed_peci_property_sanitize(struct device *dev, const char *propname,
+ u32 min, u32 max, u32 default_val, u32 *propval)
+{
+ u32 val;
+ int ret;
+
+ ret = device_property_read_u32(dev, propname, &val);
+ if (ret) {
+ val = default_val;
+ } else if (val > max || val < min) {
+ dev_warn(dev, "invalid %s: %u, falling back to: %u\n",
+ propname, val, default_val);
+
+ val = default_val;
+ }
+
+ *propval = val;
+}
+
+static void aspeed_peci_property_setup(struct aspeed_peci *priv)
+{
+ aspeed_peci_property_sanitize(priv->dev, "clock-frequency",
+ ASPEED_PECI_CLK_FREQUENCY_MIN, ASPEED_PECI_CLK_FREQUENCY_MAX,
+ ASPEED_PECI_CLK_FREQUENCY_DEFAULT, &priv->clk_frequency);
+ aspeed_peci_property_sanitize(priv->dev, "cmd-timeout-ms",
+ 1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX,
+ ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms);
+}
+
+static struct peci_controller_ops aspeed_ops = {
+ .xfer = aspeed_peci_xfer,
+};
+
+static void aspeed_peci_reset_control_release(void *data)
+{
+ reset_control_assert(data);
+}
+
+static int devm_aspeed_peci_reset_control_deassert(struct device *dev, struct reset_control *rst)
+{
+ int ret;
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, aspeed_peci_reset_control_release, rst);
+}
+
+static void aspeed_peci_clk_release(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int devm_aspeed_peci_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, aspeed_peci_clk_release, clk);
+}
+
+static int aspeed_peci_probe(struct platform_device *pdev)
+{
+ struct peci_controller *controller;
+ struct aspeed_peci *priv;
+ struct clk *ref_clk;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq)
+ return priv->irq;
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
+ 0, "peci-aspeed", priv);
+ if (ret)
+ return ret;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ priv->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->rst),
+ "failed to get reset control\n");
+
+ ret = devm_aspeed_peci_reset_control_deassert(priv->dev, priv->rst);
+ if (ret)
+ return dev_err_probe(priv->dev, ret, "cannot deassert reset control\n");
+
+ aspeed_peci_property_setup(priv);
+
+ aspeed_peci_init_regs(priv);
+
+ ref_clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(priv->dev, PTR_ERR(ref_clk), "failed to get ref clock\n");
+
+ priv->clk = devm_aspeed_peci_register_clk_div(priv->dev, ref_clk, priv);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->clk), "cannot register clock\n");
+
+ ret = clk_set_rate(priv->clk, priv->clk_frequency);
+ if (ret < 0)
+ return dev_err_probe(priv->dev, ret, "cannot set clock frequency\n");
+
+ ret = devm_aspeed_peci_clk_enable(priv->dev, priv->clk);
+ if (ret)
+ return dev_err_probe(priv->dev, ret, "failed to enable clock\n");
+
+ aspeed_peci_controller_enable(priv);
+
+ controller = devm_peci_controller_add(priv->dev, &aspeed_ops);
+ if (IS_ERR(controller))
+ return dev_err_probe(priv->dev, PTR_ERR(controller),
+ "failed to add aspeed peci controller\n");
+
+ priv->controller = controller;
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_peci_of_table[] = {
+ { .compatible = "aspeed,ast2400-peci", },
+ { .compatible = "aspeed,ast2500-peci", },
+ { .compatible = "aspeed,ast2600-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
+
+static struct platform_driver aspeed_peci_driver = {
+ .probe = aspeed_peci_probe,
+ .driver = {
+ .name = "peci-aspeed",
+ .of_match_table = aspeed_peci_of_table,
+ },
+};
+module_platform_driver(aspeed_peci_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("ASPEED PECI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PECI);
diff --git a/drivers/peci/core.c b/drivers/peci/core.c
new file mode 100644
index 000000000000..9c8cf07e51c7
--- /dev/null
+++ b/drivers/peci/core.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2021 Intel Corporation
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static DEFINE_IDA(peci_controller_ida);
+
+static void peci_controller_dev_release(struct device *dev)
+{
+ struct peci_controller *controller = to_peci_controller(dev);
+
+ mutex_destroy(&controller->bus_lock);
+ ida_free(&peci_controller_ida, controller->id);
+ kfree(controller);
+}
+
+struct device_type peci_controller_type = {
+ .release = peci_controller_dev_release,
+};
+
+int peci_controller_scan_devices(struct peci_controller *controller)
+{
+ int ret;
+ u8 addr;
+
+ for (addr = PECI_BASE_ADDR; addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX; addr++) {
+ ret = peci_device_create(controller, addr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct peci_controller *peci_controller_alloc(struct device *dev,
+ struct peci_controller_ops *ops)
+{
+ struct peci_controller *controller;
+ int ret;
+
+ if (!ops->xfer)
+ return ERR_PTR(-EINVAL);
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_alloc_max(&peci_controller_ida, U8_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+ controller->id = ret;
+
+ controller->ops = ops;
+
+ controller->dev.parent = dev;
+ controller->dev.bus = &peci_bus_type;
+ controller->dev.type = &peci_controller_type;
+
+ device_initialize(&controller->dev);
+
+ mutex_init(&controller->bus_lock);
+
+ return controller;
+
+err:
+ kfree(controller);
+ return ERR_PTR(ret);
+}
+
+static int unregister_child(struct device *dev, void *dummy)
+{
+ peci_device_destroy(to_peci_device(dev));
+
+ return 0;
+}
+
+static void unregister_controller(void *_controller)
+{
+ struct peci_controller *controller = _controller;
+
+ /*
+ * Detach any active PECI devices. This can't fail, thus we do not
+ * check the returned value.
+ */
+ device_for_each_child_reverse(&controller->dev, NULL, unregister_child);
+
+ device_unregister(&controller->dev);
+
+ fwnode_handle_put(controller->dev.fwnode);
+
+ pm_runtime_disable(&controller->dev);
+}
+
+/**
+ * devm_peci_controller_add() - add PECI controller
+ * @dev: device for devm operations
+ * @ops: pointer to controller specific methods
+ *
+ * In final stage of its probe(), peci_controller driver calls
+ * devm_peci_controller_add() to register itself with the PECI bus.
+ *
+ * Return: Pointer to the newly allocated controller or ERR_PTR() in case of failure.
+ */
+struct peci_controller *devm_peci_controller_add(struct device *dev,
+ struct peci_controller_ops *ops)
+{
+ struct peci_controller *controller;
+ int ret;
+
+ controller = peci_controller_alloc(dev, ops);
+ if (IS_ERR(controller))
+ return controller;
+
+ ret = dev_set_name(&controller->dev, "peci-%d", controller->id);
+ if (ret)
+ goto err_put;
+
+ pm_runtime_no_callbacks(&controller->dev);
+ pm_suspend_ignore_children(&controller->dev, true);
+ pm_runtime_enable(&controller->dev);
+
+ device_set_node(&controller->dev, fwnode_handle_get(dev_fwnode(dev)));
+
+ ret = device_add(&controller->dev);
+ if (ret)
+ goto err_fwnode;
+
+ ret = devm_add_action_or_reset(dev, unregister_controller, controller);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * Ignoring retval since failures during scan are non-critical for
+ * controller itself.
+ */
+ peci_controller_scan_devices(controller);
+
+ return controller;
+
+err_fwnode:
+ fwnode_handle_put(controller->dev.fwnode);
+
+ pm_runtime_disable(&controller->dev);
+
+err_put:
+ put_device(&controller->dev);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(devm_peci_controller_add, PECI);
+
+static const struct peci_device_id *
+peci_bus_match_device_id(const struct peci_device_id *id, struct peci_device *device)
+{
+ while (id->family != 0) {
+ if (id->family == device->info.family &&
+ id->model == device->info.model)
+ return id;
+ id++;
+ }
+
+ return NULL;
+}
+
+static int peci_bus_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct peci_device *device = to_peci_device(dev);
+ struct peci_driver *peci_drv = to_peci_driver(drv);
+
+ if (dev->type != &peci_device_type)
+ return 0;
+
+ return !!peci_bus_match_device_id(peci_drv->id_table, device);
+}
+
+static int peci_bus_device_probe(struct device *dev)
+{
+ struct peci_device *device = to_peci_device(dev);
+ struct peci_driver *driver = to_peci_driver(dev->driver);
+
+ return driver->probe(device, peci_bus_match_device_id(driver->id_table, device));
+}
+
+static void peci_bus_device_remove(struct device *dev)
+{
+ struct peci_device *device = to_peci_device(dev);
+ struct peci_driver *driver = to_peci_driver(dev->driver);
+
+ if (driver->remove)
+ driver->remove(device);
+}
+
+struct bus_type peci_bus_type = {
+ .name = "peci",
+ .match = peci_bus_device_match,
+ .probe = peci_bus_device_probe,
+ .remove = peci_bus_device_remove,
+ .bus_groups = peci_bus_groups,
+};
+
+static int __init peci_init(void)
+{
+ int ret;
+
+ ret = bus_register(&peci_bus_type);
+ if (ret < 0) {
+ pr_err("peci: failed to register PECI bus type!\n");
+ return ret;
+ }
+
+ return 0;
+}
+module_init(peci_init);
+
+static void __exit peci_exit(void)
+{
+ bus_unregister(&peci_bus_type);
+}
+module_exit(peci_exit);
+
+MODULE_AUTHOR("Jason M Bills <jason.m.bills@linux.intel.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("PECI bus core module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
new file mode 100644
index 000000000000..68eb61c65d34
--- /dev/null
+++ b/drivers/peci/cpu.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Intel Corporation
+
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/peci.h>
+#include <linux/peci-cpu.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+/**
+ * peci_temp_read() - read the maximum die temperature from PECI target device
+ * @device: PECI device to which request is going to be sent
+ * @temp_raw: where to store the read temperature
+ *
+ * It uses GetTemp PECI command.
+ *
+ * Return: 0 if succeeded, other values in case errors.
+ */
+int peci_temp_read(struct peci_device *device, s16 *temp_raw)
+{
+ struct peci_request *req;
+
+ req = peci_xfer_get_temp(device);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ *temp_raw = peci_request_temp_read(req);
+
+ peci_request_free(req);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(peci_temp_read, PECI_CPU);
+
+/**
+ * peci_pcs_read() - read PCS register
+ * @device: PECI device to which request is going to be sent
+ * @index: PCS index
+ * @param: PCS parameter
+ * @data: where to store the read data
+ *
+ * It uses RdPkgConfig PECI command.
+ *
+ * Return: 0 if succeeded, other values in case errors.
+ */
+int peci_pcs_read(struct peci_device *device, u8 index, u16 param, u32 *data)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_xfer_pkg_cfg_readl(device, index, param);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = peci_request_status(req);
+ if (ret)
+ goto out_req_free;
+
+ *data = peci_request_data_readl(req);
+out_req_free:
+ peci_request_free(req);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(peci_pcs_read, PECI_CPU);
+
+/**
+ * peci_pci_local_read() - read 32-bit memory location using raw address
+ * @device: PECI device to which request is going to be sent
+ * @bus: bus
+ * @dev: device
+ * @func: function
+ * @reg: register
+ * @data: where to store the read data
+ *
+ * It uses RdPCIConfigLocal PECI command.
+ *
+ * Return: 0 if succeeded, other values in case errors.
+ */
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func,
+ u16 reg, u32 *data)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_xfer_pci_cfg_local_readl(device, bus, dev, func, reg);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = peci_request_status(req);
+ if (ret)
+ goto out_req_free;
+
+ *data = peci_request_data_readl(req);
+out_req_free:
+ peci_request_free(req);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(peci_pci_local_read, PECI_CPU);
+
+/**
+ * peci_ep_pci_local_read() - read 32-bit memory location using raw address
+ * @device: PECI device to which request is going to be sent
+ * @seg: PCI segment
+ * @bus: bus
+ * @dev: device
+ * @func: function
+ * @reg: register
+ * @data: where to store the read data
+ *
+ * Like &peci_pci_local_read, but it uses RdEndpointConfig PECI command.
+ *
+ * Return: 0 if succeeded, other values in case errors.
+ */
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_xfer_ep_pci_cfg_local_readl(device, seg, bus, dev, func, reg);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = peci_request_status(req);
+ if (ret)
+ goto out_req_free;
+
+ *data = peci_request_data_readl(req);
+out_req_free:
+ peci_request_free(req);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(peci_ep_pci_local_read, PECI_CPU);
+
+/**
+ * peci_mmio_read() - read 32-bit memory location using 64-bit bar offset address
+ * @device: PECI device to which request is going to be sent
+ * @bar: PCI bar
+ * @seg: PCI segment
+ * @bus: bus
+ * @dev: device
+ * @func: function
+ * @address: 64-bit MMIO address
+ * @data: where to store the read data
+ *
+ * It uses RdEndpointConfig PECI command.
+ *
+ * Return: 0 if succeeded, other values in case errors.
+ */
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_xfer_ep_mmio64_readl(device, bar, seg, bus, dev, func, address);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = peci_request_status(req);
+ if (ret)
+ goto out_req_free;
+
+ *data = peci_request_data_readl(req);
+out_req_free:
+ peci_request_free(req);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(peci_mmio_read, PECI_CPU);
+
+static const char * const peci_adev_types[] = {
+ "cputemp",
+ "dimmtemp",
+};
+
+struct peci_cpu {
+ struct peci_device *device;
+ const struct peci_device_id *id;
+};
+
+static void adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ auxiliary_device_uninit(adev);
+
+ kfree(adev->name);
+ kfree(adev);
+}
+
+static struct auxiliary_device *adev_alloc(struct peci_cpu *priv, int idx)
+{
+ struct peci_controller *controller = to_peci_controller(priv->device->dev.parent);
+ struct auxiliary_device *adev;
+ const char *name;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ name = kasprintf(GFP_KERNEL, "%s.%s", peci_adev_types[idx], (const char *)priv->id->data);
+ if (!name) {
+ ret = -ENOMEM;
+ goto free_adev;
+ }
+
+ adev->name = name;
+ adev->dev.parent = &priv->device->dev;
+ adev->dev.release = adev_release;
+ adev->id = (controller->id << 16) | (priv->device->addr);
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto free_name;
+
+ return adev;
+
+free_name:
+ kfree(name);
+free_adev:
+ kfree(adev);
+ return ERR_PTR(ret);
+}
+
+static void unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+}
+
+static int devm_adev_add(struct device *dev, int idx)
+{
+ struct peci_cpu *priv = dev_get_drvdata(dev);
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = adev_alloc(priv, idx);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&priv->device->dev, unregister_adev, adev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void peci_cpu_add_adevices(struct peci_cpu *priv)
+{
+ struct device *dev = &priv->device->dev;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(peci_adev_types); i++) {
+ ret = devm_adev_add(dev, i);
+ if (ret) {
+ dev_warn(dev, "Failed to register PECI auxiliary: %s, ret = %d\n",
+ peci_adev_types[i], ret);
+ continue;
+ }
+ }
+}
+
+static int
+peci_cpu_probe(struct peci_device *device, const struct peci_device_id *id)
+{
+ struct device *dev = &device->dev;
+ struct peci_cpu *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->device = device;
+ priv->id = id;
+
+ peci_cpu_add_adevices(priv);
+
+ return 0;
+}
+
+static const struct peci_device_id peci_cpu_device_ids[] = {
+ { /* Haswell Xeon */
+ .family = 6,
+ .model = INTEL_FAM6_HASWELL_X,
+ .data = "hsx",
+ },
+ { /* Broadwell Xeon */
+ .family = 6,
+ .model = INTEL_FAM6_BROADWELL_X,
+ .data = "bdx",
+ },
+ { /* Broadwell Xeon D */
+ .family = 6,
+ .model = INTEL_FAM6_BROADWELL_D,
+ .data = "bdxd",
+ },
+ { /* Skylake Xeon */
+ .family = 6,
+ .model = INTEL_FAM6_SKYLAKE_X,
+ .data = "skx",
+ },
+ { /* Icelake Xeon */
+ .family = 6,
+ .model = INTEL_FAM6_ICELAKE_X,
+ .data = "icx",
+ },
+ { /* Icelake Xeon D */
+ .family = 6,
+ .model = INTEL_FAM6_ICELAKE_D,
+ .data = "icxd",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(peci, peci_cpu_device_ids);
+
+static struct peci_driver peci_cpu_driver = {
+ .probe = peci_cpu_probe,
+ .id_table = peci_cpu_device_ids,
+ .driver = {
+ .name = "peci-cpu",
+ },
+};
+module_peci_driver(peci_cpu_driver);
+
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("PECI CPU driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PECI);
diff --git a/drivers/peci/device.c b/drivers/peci/device.c
new file mode 100644
index 000000000000..e6b0bffb14f4
--- /dev/null
+++ b/drivers/peci/device.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2021 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/peci.h>
+#include <linux/peci-cpu.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+/*
+ * PECI device can be removed using sysfs, but the removal can also happen as
+ * a result of controller being removed.
+ * Mutex is used to protect PECI device from being double-deleted.
+ */
+static DEFINE_MUTEX(peci_device_del_lock);
+
+#define REVISION_NUM_MASK GENMASK(15, 8)
+static int peci_get_revision(struct peci_device *device, u8 *revision)
+{
+ struct peci_request *req;
+ u64 dib;
+
+ req = peci_xfer_get_dib(device);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ /*
+ * PECI device may be in a state where it is unable to return a proper
+ * DIB, in which case it returns 0 as DIB value.
+ * Let's treat this as an error to avoid carrying on with the detection
+ * using invalid revision.
+ */
+ dib = peci_request_dib_read(req);
+ if (dib == 0) {
+ peci_request_free(req);
+ return -EIO;
+ }
+
+ *revision = FIELD_GET(REVISION_NUM_MASK, dib);
+
+ peci_request_free(req);
+
+ return 0;
+}
+
+static int peci_get_cpu_id(struct peci_device *device, u32 *cpu_id)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_xfer_pkg_cfg_readl(device, PECI_PCS_PKG_ID, PECI_PKG_ID_CPU_ID);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = peci_request_status(req);
+ if (ret)
+ goto out_req_free;
+
+ *cpu_id = peci_request_data_readl(req);
+out_req_free:
+ peci_request_free(req);
+
+ return ret;
+}
+
+static unsigned int peci_x86_cpu_family(unsigned int sig)
+{
+ unsigned int x86;
+
+ x86 = (sig >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (sig >> 20) & 0xff;
+
+ return x86;
+}
+
+static unsigned int peci_x86_cpu_model(unsigned int sig)
+{
+ unsigned int fam, model;
+
+ fam = peci_x86_cpu_family(sig);
+
+ model = (sig >> 4) & 0xf;
+
+ if (fam >= 0x6)
+ model += ((sig >> 16) & 0xf) << 4;
+
+ return model;
+}
+
+static int peci_device_info_init(struct peci_device *device)
+{
+ u8 revision;
+ u32 cpu_id;
+ int ret;
+
+ ret = peci_get_cpu_id(device, &cpu_id);
+ if (ret)
+ return ret;
+
+ device->info.family = peci_x86_cpu_family(cpu_id);
+ device->info.model = peci_x86_cpu_model(cpu_id);
+
+ ret = peci_get_revision(device, &revision);
+ if (ret)
+ return ret;
+ device->info.peci_revision = revision;
+
+ device->info.socket_id = device->addr - PECI_BASE_ADDR;
+
+ return 0;
+}
+
+static int peci_detect(struct peci_controller *controller, u8 addr)
+{
+ /*
+ * PECI Ping is a command encoded by tx_len = 0, rx_len = 0.
+ * We expect correct Write FCS if the device at the target address
+ * is able to respond.
+ */
+ struct peci_request req = { 0 };
+ int ret;
+
+ mutex_lock(&controller->bus_lock);
+ ret = controller->ops->xfer(controller, addr, &req);
+ mutex_unlock(&controller->bus_lock);
+
+ return ret;
+}
+
+static bool peci_addr_valid(u8 addr)
+{
+ return addr >= PECI_BASE_ADDR && addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX;
+}
+
+static int peci_dev_exists(struct device *dev, void *data)
+{
+ struct peci_device *device = to_peci_device(dev);
+ u8 *addr = data;
+
+ if (device->addr == *addr)
+ return -EBUSY;
+
+ return 0;
+}
+
+int peci_device_create(struct peci_controller *controller, u8 addr)
+{
+ struct peci_device *device;
+ int ret;
+
+ if (!peci_addr_valid(addr))
+ return -EINVAL;
+
+ /* Check if we have already detected this device before. */
+ ret = device_for_each_child(&controller->dev, &addr, peci_dev_exists);
+ if (ret)
+ return 0;
+
+ ret = peci_detect(controller, addr);
+ if (ret) {
+ /*
+ * Device not present or host state doesn't allow successful
+ * detection at this time.
+ */
+ if (ret == -EIO || ret == -ETIMEDOUT)
+ return 0;
+
+ return ret;
+ }
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ device_initialize(&device->dev);
+
+ device->addr = addr;
+ device->dev.parent = &controller->dev;
+ device->dev.bus = &peci_bus_type;
+ device->dev.type = &peci_device_type;
+
+ ret = peci_device_info_init(device);
+ if (ret)
+ goto err_put;
+
+ ret = dev_set_name(&device->dev, "%d-%02x", controller->id, device->addr);
+ if (ret)
+ goto err_put;
+
+ ret = device_add(&device->dev);
+ if (ret)
+ goto err_put;
+
+ return 0;
+
+err_put:
+ put_device(&device->dev);
+
+ return ret;
+}
+
+void peci_device_destroy(struct peci_device *device)
+{
+ mutex_lock(&peci_device_del_lock);
+ if (!device->deleted) {
+ device_unregister(&device->dev);
+ device->deleted = true;
+ }
+ mutex_unlock(&peci_device_del_lock);
+}
+
+int __peci_driver_register(struct peci_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ driver->driver.bus = &peci_bus_type;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ if (!driver->probe) {
+ pr_err("peci: trying to register driver without probe callback\n");
+ return -EINVAL;
+ }
+
+ if (!driver->id_table) {
+ pr_err("peci: trying to register driver without device id table\n");
+ return -EINVAL;
+ }
+
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_NS_GPL(__peci_driver_register, PECI);
+
+void peci_driver_unregister(struct peci_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_NS_GPL(peci_driver_unregister, PECI);
+
+static void peci_device_release(struct device *dev)
+{
+ struct peci_device *device = to_peci_device(dev);
+
+ kfree(device);
+}
+
+struct device_type peci_device_type = {
+ .groups = peci_device_groups,
+ .release = peci_device_release,
+};
diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h
new file mode 100644
index 000000000000..9d75ea54504c
--- /dev/null
+++ b/drivers/peci/internal.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __PECI_INTERNAL_H
+#define __PECI_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct peci_controller;
+struct attribute_group;
+struct peci_device;
+struct peci_request;
+
+/* PECI CPU address range 0x30-0x37 */
+#define PECI_BASE_ADDR 0x30
+#define PECI_DEVICE_NUM_MAX 8
+
+struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len);
+void peci_request_free(struct peci_request *req);
+
+int peci_request_status(struct peci_request *req);
+
+u64 peci_request_dib_read(struct peci_request *req);
+s16 peci_request_temp_read(struct peci_request *req);
+
+u8 peci_request_data_readb(struct peci_request *req);
+u16 peci_request_data_readw(struct peci_request *req);
+u32 peci_request_data_readl(struct peci_request *req);
+u64 peci_request_data_readq(struct peci_request *req);
+
+struct peci_request *peci_xfer_get_dib(struct peci_device *device);
+struct peci_request *peci_xfer_get_temp(struct peci_device *device);
+
+struct peci_request *peci_xfer_pkg_cfg_readb(struct peci_device *device, u8 index, u16 param);
+struct peci_request *peci_xfer_pkg_cfg_readw(struct peci_device *device, u8 index, u16 param);
+struct peci_request *peci_xfer_pkg_cfg_readl(struct peci_device *device, u8 index, u16 param);
+struct peci_request *peci_xfer_pkg_cfg_readq(struct peci_device *device, u8 index, u16 param);
+
+struct peci_request *peci_xfer_pci_cfg_local_readb(struct peci_device *device,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_pci_cfg_local_readw(struct peci_device *device,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_pci_cfg_local_readl(struct peci_device *device,
+ u8 bus, u8 dev, u8 func, u16 reg);
+
+struct peci_request *peci_xfer_ep_pci_cfg_local_readb(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_ep_pci_cfg_local_readw(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_ep_pci_cfg_local_readl(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+
+struct peci_request *peci_xfer_ep_pci_cfg_readb(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_ep_pci_cfg_readw(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+struct peci_request *peci_xfer_ep_pci_cfg_readl(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg);
+
+struct peci_request *peci_xfer_ep_mmio32_readl(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 offset);
+
+struct peci_request *peci_xfer_ep_mmio64_readl(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 offset);
+/**
+ * struct peci_device_id - PECI device data to match
+ * @data: pointer to driver private data specific to device
+ * @family: device family
+ * @model: device model
+ */
+struct peci_device_id {
+ const void *data;
+ u16 family;
+ u8 model;
+};
+
+extern struct device_type peci_device_type;
+extern const struct attribute_group *peci_device_groups[];
+
+int peci_device_create(struct peci_controller *controller, u8 addr);
+void peci_device_destroy(struct peci_device *device);
+
+extern struct bus_type peci_bus_type;
+extern const struct attribute_group *peci_bus_groups[];
+
+/**
+ * struct peci_driver - PECI driver
+ * @driver: inherit device driver
+ * @probe: probe callback
+ * @remove: remove callback
+ * @id_table: PECI device match table to decide which device to bind
+ */
+struct peci_driver {
+ struct device_driver driver;
+ int (*probe)(struct peci_device *device, const struct peci_device_id *id);
+ void (*remove)(struct peci_device *device);
+ const struct peci_device_id *id_table;
+};
+
+static inline struct peci_driver *to_peci_driver(struct device_driver *d)
+{
+ return container_of(d, struct peci_driver, driver);
+}
+
+int __peci_driver_register(struct peci_driver *driver, struct module *owner,
+ const char *mod_name);
+/**
+ * peci_driver_register() - register PECI driver
+ * @driver: the driver to be registered
+ *
+ * PECI drivers that don't need to do anything special in module init should
+ * use the convenience "module_peci_driver" macro instead
+ *
+ * Return: zero on success, else a negative error code.
+ */
+#define peci_driver_register(driver) \
+ __peci_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+void peci_driver_unregister(struct peci_driver *driver);
+
+/**
+ * module_peci_driver() - helper macro for registering a modular PECI driver
+ * @__peci_driver: peci_driver struct
+ *
+ * Helper macro for PECI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_peci_driver(__peci_driver) \
+ module_driver(__peci_driver, peci_driver_register, peci_driver_unregister)
+
+extern struct device_type peci_controller_type;
+
+int peci_controller_scan_devices(struct peci_controller *controller);
+
+#endif /* __PECI_INTERNAL_H */
diff --git a/drivers/peci/request.c b/drivers/peci/request.c
new file mode 100644
index 000000000000..8d6dd7b6b559
--- /dev/null
+++ b/drivers/peci/request.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Intel Corporation
+
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/peci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include "internal.h"
+
+#define PECI_GET_DIB_CMD 0xf7
+#define PECI_GET_DIB_WR_LEN 1
+#define PECI_GET_DIB_RD_LEN 8
+
+#define PECI_GET_TEMP_CMD 0x01
+#define PECI_GET_TEMP_WR_LEN 1
+#define PECI_GET_TEMP_RD_LEN 2
+
+#define PECI_RDPKGCFG_CMD 0xa1
+#define PECI_RDPKGCFG_WR_LEN 5
+#define PECI_RDPKGCFG_RD_LEN_BASE 1
+#define PECI_WRPKGCFG_CMD 0xa5
+#define PECI_WRPKGCFG_WR_LEN_BASE 6
+#define PECI_WRPKGCFG_RD_LEN 1
+
+#define PECI_RDIAMSR_CMD 0xb1
+#define PECI_RDIAMSR_WR_LEN 5
+#define PECI_RDIAMSR_RD_LEN 9
+#define PECI_WRIAMSR_CMD 0xb5
+#define PECI_RDIAMSREX_CMD 0xd1
+#define PECI_RDIAMSREX_WR_LEN 6
+#define PECI_RDIAMSREX_RD_LEN 9
+
+#define PECI_RDPCICFG_CMD 0x61
+#define PECI_RDPCICFG_WR_LEN 6
+#define PECI_RDPCICFG_RD_LEN 5
+#define PECI_RDPCICFG_RD_LEN_MAX 24
+#define PECI_WRPCICFG_CMD 0x65
+
+#define PECI_RDPCICFGLOCAL_CMD 0xe1
+#define PECI_RDPCICFGLOCAL_WR_LEN 5
+#define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1
+#define PECI_WRPCICFGLOCAL_CMD 0xe5
+#define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6
+#define PECI_WRPCICFGLOCAL_RD_LEN 1
+
+#define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03
+#define PECI_ENDPTCFG_TYPE_PCI 0x04
+#define PECI_ENDPTCFG_TYPE_MMIO 0x05
+#define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04
+#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05
+#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06
+#define PECI_RDENDPTCFG_CMD 0xc1
+#define PECI_RDENDPTCFG_PCI_WR_LEN 12
+#define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10
+#define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14
+#define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18
+#define PECI_RDENDPTCFG_RD_LEN_BASE 1
+#define PECI_WRENDPTCFG_CMD 0xc5
+#define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13
+#define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15
+#define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19
+#define PECI_WRENDPTCFG_RD_LEN 1
+
+/* Device Specific Completion Code (CC) Definition */
+#define PECI_CC_SUCCESS 0x40
+#define PECI_CC_NEED_RETRY 0x80
+#define PECI_CC_OUT_OF_RESOURCE 0x81
+#define PECI_CC_UNAVAIL_RESOURCE 0x82
+#define PECI_CC_INVALID_REQ 0x90
+#define PECI_CC_MCA_ERROR 0x91
+#define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93
+#define PECI_CC_FATAL_MCA_ERROR 0x94
+#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98
+#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B
+#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C
+
+#define PECI_RETRY_BIT BIT(0)
+
+#define PECI_RETRY_TIMEOUT msecs_to_jiffies(700)
+#define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1)
+#define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128)
+
+static u8 peci_request_data_cc(struct peci_request *req)
+{
+ return req->rx.buf[0];
+}
+
+/**
+ * peci_request_status() - return -errno based on PECI completion code
+ * @req: the PECI request that contains response data with completion code
+ *
+ * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we
+ * don't expect completion code in the response.
+ *
+ * Return: -errno
+ */
+int peci_request_status(struct peci_request *req)
+{
+ u8 cc = peci_request_data_cc(req);
+
+ if (cc != PECI_CC_SUCCESS)
+ dev_dbg(&req->device->dev, "ret: %#02x\n", cc);
+
+ switch (cc) {
+ case PECI_CC_SUCCESS:
+ return 0;
+ case PECI_CC_NEED_RETRY:
+ case PECI_CC_OUT_OF_RESOURCE:
+ case PECI_CC_UNAVAIL_RESOURCE:
+ return -EAGAIN;
+ case PECI_CC_INVALID_REQ:
+ return -EINVAL;
+ case PECI_CC_MCA_ERROR:
+ case PECI_CC_CATASTROPHIC_MCA_ERROR:
+ case PECI_CC_FATAL_MCA_ERROR:
+ case PECI_CC_PARITY_ERR_GPSB_OR_PMSB:
+ case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR:
+ case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA:
+ return -EIO;
+ }
+
+ WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc);
+
+ return -EIO;
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI);
+
+static int peci_request_xfer(struct peci_request *req)
+{
+ struct peci_device *device = req->device;
+ struct peci_controller *controller = to_peci_controller(device->dev.parent);
+ int ret;
+
+ mutex_lock(&controller->bus_lock);
+ ret = controller->ops->xfer(controller, device->addr, req);
+ mutex_unlock(&controller->bus_lock);
+
+ return ret;
+}
+
+static int peci_request_xfer_retry(struct peci_request *req)
+{
+ long wait_interval = PECI_RETRY_INTERVAL_MIN;
+ struct peci_device *device = req->device;
+ struct peci_controller *controller = to_peci_controller(device->dev.parent);
+ unsigned long start = jiffies;
+ int ret;
+
+ /* Don't try to use it for ping */
+ if (WARN_ON(req->tx.len == 0))
+ return 0;
+
+ do {
+ ret = peci_request_xfer(req);
+ if (ret) {
+ dev_dbg(&controller->dev, "xfer error: %d\n", ret);
+ return ret;
+ }
+
+ if (peci_request_status(req) != -EAGAIN)
+ return 0;
+
+ /* Set the retry bit to indicate a retry attempt */
+ req->tx.buf[1] |= PECI_RETRY_BIT;
+
+ if (schedule_timeout_interruptible(wait_interval))
+ return -ERESTARTSYS;
+
+ wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX);
+ } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT));
+
+ dev_dbg(&controller->dev, "request timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * peci_request_alloc() - allocate &struct peci_requests
+ * @device: PECI device to which request is going to be sent
+ * @tx_len: TX length
+ * @rx_len: RX length
+ *
+ * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise.
+ */
+struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len)
+{
+ struct peci_request *req;
+
+ /*
+ * TX and RX buffers are fixed length members of peci_request, this is
+ * just a warn for developers to make sure to expand the buffers (or
+ * change the allocation method) if we go over the current limit.
+ */
+ if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE))
+ return NULL;
+ /*
+ * PECI controllers that we are using now don't support DMA, this
+ * should be converted to DMA API once support for controllers that do
+ * allow it is added to avoid an extra copy.
+ */
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->device = device;
+ req->tx.len = tx_len;
+ req->rx.len = rx_len;
+
+ return req;
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI);
+
+/**
+ * peci_request_free() - free peci_request
+ * @req: the PECI request to be freed
+ */
+void peci_request_free(struct peci_request *req)
+{
+ kfree(req);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI);
+
+struct peci_request *peci_xfer_get_dib(struct peci_device *device)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->tx.buf[0] = PECI_GET_DIB_CMD;
+
+ ret = peci_request_xfer(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI);
+
+struct peci_request *peci_xfer_get_temp(struct peci_device *device)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->tx.buf[0] = PECI_GET_TEMP_CMD;
+
+ ret = peci_request_xfer(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI);
+
+static struct peci_request *
+__pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->tx.buf[0] = PECI_RDPKGCFG_CMD;
+ req->tx.buf[1] = 0;
+ req->tx.buf[2] = index;
+ put_unaligned_le16(param, &req->tx.buf[3]);
+
+ ret = peci_request_xfer_retry(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+
+static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg)
+{
+ return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12;
+}
+
+static struct peci_request *
+__pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len)
+{
+ struct peci_request *req;
+ u32 pci_addr;
+ int ret;
+
+ req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN,
+ PECI_RDPCICFGLOCAL_RD_LEN_BASE + len);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ pci_addr = __get_pci_addr(bus, dev, func, reg);
+
+ req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD;
+ req->tx.buf[1] = 0;
+ put_unaligned_le24(pci_addr, &req->tx.buf[2]);
+
+ ret = peci_request_xfer_retry(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+
+static struct peci_request *
+__ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u8 len)
+{
+ struct peci_request *req;
+ u32 pci_addr;
+ int ret;
+
+ req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN,
+ PECI_RDENDPTCFG_RD_LEN_BASE + len);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ pci_addr = __get_pci_addr(bus, dev, func, reg);
+
+ req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
+ req->tx.buf[1] = 0;
+ req->tx.buf[2] = msg_type;
+ req->tx.buf[3] = 0;
+ req->tx.buf[4] = 0;
+ req->tx.buf[5] = 0;
+ req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI;
+ req->tx.buf[7] = seg; /* PCI Segment */
+ put_unaligned_le32(pci_addr, &req->tx.buf[8]);
+
+ ret = peci_request_xfer_retry(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+
+static struct peci_request *
+__ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len)
+{
+ struct peci_request *req;
+ int ret;
+
+ req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
+ req->tx.buf[1] = 0;
+ req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO;
+ req->tx.buf[3] = 0; /* Endpoint ID */
+ req->tx.buf[4] = 0; /* Reserved */
+ req->tx.buf[5] = bar;
+ req->tx.buf[6] = addr_type;
+ req->tx.buf[7] = seg; /* PCI Segment */
+ req->tx.buf[8] = PCI_DEVFN(dev, func);
+ req->tx.buf[9] = bus; /* PCI Bus */
+
+ if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
+ put_unaligned_le32(offset, &req->tx.buf[10]);
+ else
+ put_unaligned_le64(offset, &req->tx.buf[10]);
+
+ ret = peci_request_xfer_retry(req);
+ if (ret) {
+ peci_request_free(req);
+ return ERR_PTR(ret);
+ }
+
+ return req;
+}
+
+u8 peci_request_data_readb(struct peci_request *req)
+{
+ return req->rx.buf[1];
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI);
+
+u16 peci_request_data_readw(struct peci_request *req)
+{
+ return get_unaligned_le16(&req->rx.buf[1]);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI);
+
+u32 peci_request_data_readl(struct peci_request *req)
+{
+ return get_unaligned_le32(&req->rx.buf[1]);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI);
+
+u64 peci_request_data_readq(struct peci_request *req)
+{
+ return get_unaligned_le64(&req->rx.buf[1]);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI);
+
+u64 peci_request_dib_read(struct peci_request *req)
+{
+ return get_unaligned_le64(&req->rx.buf[0]);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI);
+
+s16 peci_request_temp_read(struct peci_request *req)
+{
+ return get_unaligned_le16(&req->rx.buf[0]);
+}
+EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI);
+
+#define __read_pkg_config(x, type) \
+struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \
+{ \
+ return __pkg_cfg_read(device, index, param, sizeof(type)); \
+} \
+EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI)
+
+__read_pkg_config(readb, u8);
+__read_pkg_config(readw, u16);
+__read_pkg_config(readl, u32);
+__read_pkg_config(readq, u64);
+
+#define __read_pci_config_local(x, type) \
+struct peci_request * \
+peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \
+{ \
+ return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \
+} \
+EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI)
+
+__read_pci_config_local(readb, u8);
+__read_pci_config_local(readw, u16);
+__read_pci_config_local(readl, u32);
+
+#define __read_ep_pci_config(x, msg_type, type) \
+struct peci_request * \
+peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \
+{ \
+ return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \
+} \
+EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI)
+
+__read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8);
+__read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16);
+__read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32);
+__read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8);
+__read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16);
+__read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32);
+
+#define __read_ep_mmio(x, y, addr_type, type1, type2) \
+struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \
+ u8 bus, u8 dev, u8 func, u64 offset) \
+{ \
+ return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \
+ offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \
+ sizeof(type2)); \
+} \
+EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI)
+
+__read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32);
+__read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32);
diff --git a/drivers/peci/sysfs.c b/drivers/peci/sysfs.c
new file mode 100644
index 000000000000..db9ef05776e3
--- /dev/null
+++ b/drivers/peci/sysfs.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/peci.h>
+
+#include "internal.h"
+
+static int rescan_controller(struct device *dev, void *data)
+{
+ if (dev->type != &peci_controller_type)
+ return 0;
+
+ return peci_controller_scan_devices(to_peci_controller(dev));
+}
+
+static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ bool res;
+ int ret;
+
+ ret = kstrtobool(buf, &res);
+ if (ret)
+ return ret;
+
+ if (!res)
+ return count;
+
+ ret = bus_for_each_dev(&peci_bus_type, NULL, NULL, rescan_controller);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static struct attribute *peci_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL
+};
+
+static const struct attribute_group peci_bus_group = {
+ .attrs = peci_bus_attrs,
+};
+
+const struct attribute_group *peci_bus_groups[] = {
+ &peci_bus_group,
+ NULL
+};
+
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct peci_device *device = to_peci_device(dev);
+ bool res;
+ int ret;
+
+ ret = kstrtobool(buf, &res);
+ if (ret)
+ return ret;
+
+ if (res && device_remove_file_self(dev, attr))
+ peci_device_destroy(device);
+
+ return count;
+}
+static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0200, NULL, remove_store);
+
+static struct attribute *peci_device_attrs[] = {
+ &dev_attr_remove.attr,
+ NULL
+};
+
+static const struct attribute_group peci_device_group = {
+ .attrs = peci_device_attrs,
+};
+
+const struct attribute_group *peci_device_groups[] = {
+ &peci_device_group,
+ NULL
+};
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 35799e6401c9..2f4b11b4dfcd 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -169,7 +169,7 @@ static int pps_gpio_probe(struct platform_device *pdev)
/* GPIO setup */
ret = pps_gpio_setup(dev);
if (ret)
- return -EINVAL;
+ return ret;
/* IRQ setup */
ret = gpiod_to_irq(data->gpio_pin);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 6f8ba0ddc05f..b496028b6bfa 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -256,6 +256,19 @@ config RESET_TI_SYSCON
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_TN48M_CPLD
+ tristate "Delta Networks TN48M switch CPLD reset controller"
+ depends on MFD_TN48M_CPLD || COMPILE_TEST
+ default MFD_TN48M_CPLD
+ help
+ This enables the reset controller driver for the Delta TN48M CPLD.
+ It provides reset signals for Armada 7040 and 385 SoC-s, Alleycat 3X
+ switch MAC-s, Alaska OOB ethernet PHY, Quad Alaska ethernet PHY-s and
+ Microchip PD69200 PoE PSE controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called reset-tn48m.
+
config RESET_UNIPHIER
tristate "Reset controller driver for UniPhier SoCs"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bd0a97be18b5..a80a9c4008a7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_RESET_STARFIVE_JH7100) += reset-starfive-jh7100.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/reset-tn48m.c b/drivers/reset/reset-tn48m.c
new file mode 100644
index 000000000000..130027291b6e
--- /dev/null
+++ b/drivers/reset/reset-tn48m.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Delta TN48M CPLD reset driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/reset/delta,tn48m-reset.h>
+
+#define TN48M_RESET_REG 0x10
+
+#define TN48M_RESET_TIMEOUT_US 125000
+#define TN48M_RESET_SLEEP_US 10
+
+struct tn48_reset_map {
+ u8 bit;
+};
+
+struct tn48_reset_data {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+};
+
+static const struct tn48_reset_map tn48m_resets[] = {
+ [CPU_88F7040_RESET] = {0},
+ [CPU_88F6820_RESET] = {1},
+ [MAC_98DX3265_RESET] = {2},
+ [PHY_88E1680_RESET] = {4},
+ [PHY_88E1512_RESET] = {6},
+ [POE_RESET] = {7},
+};
+
+static inline struct tn48_reset_data *to_tn48_reset_data(
+ struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tn48_reset_data, rcdev);
+}
+
+static int tn48m_control_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tn48_reset_data *data = to_tn48_reset_data(rcdev);
+ unsigned int val;
+
+ regmap_update_bits(data->regmap, TN48M_RESET_REG,
+ BIT(tn48m_resets[id].bit), 0);
+
+ return regmap_read_poll_timeout(data->regmap,
+ TN48M_RESET_REG,
+ val,
+ val & BIT(tn48m_resets[id].bit),
+ TN48M_RESET_SLEEP_US,
+ TN48M_RESET_TIMEOUT_US);
+}
+
+static int tn48m_control_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tn48_reset_data *data = to_tn48_reset_data(rcdev);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, TN48M_RESET_REG, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (BIT(tn48m_resets[id].bit) & regval)
+ return 0;
+ else
+ return 1;
+}
+
+static const struct reset_control_ops tn48_reset_ops = {
+ .reset = tn48m_control_reset,
+ .status = tn48m_control_status,
+};
+
+static int tn48m_reset_probe(struct platform_device *pdev)
+{
+ struct tn48_reset_data *data;
+ struct regmap *regmap;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = regmap;
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = &tn48_reset_ops;
+ data->rcdev.nr_resets = ARRAY_SIZE(tn48m_resets);
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static const struct of_device_id tn48m_reset_of_match[] = {
+ { .compatible = "delta,tn48m-reset" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tn48m_reset_of_match);
+
+static struct platform_driver tn48m_reset_driver = {
+ .driver = {
+ .name = "delta-tn48m-reset",
+ .of_match_table = tn48m_reset_of_match,
+ },
+ .probe = tn48m_reset_probe,
+};
+module_platform_driver(tn48m_reset_driver);
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("Delta TN48M CPLD reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 3a5336913cca..b3f310389378 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -154,10 +154,8 @@ int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
void tb_nvm_free(struct tb_nvm *nvm)
{
if (nvm) {
- if (nvm->non_active)
- nvmem_unregister(nvm->non_active);
- if (nvm->active)
- nvmem_unregister(nvm->active);
+ nvmem_unregister(nvm->non_active);
+ nvmem_unregister(nvm->active);
vfree(nvm->buf);
ida_simple_remove(&nvm_ida, nvm->id);
}
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 565578002d79..c7b8a8e787e2 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -2089,16 +2089,20 @@ static ssize_t w1_seq_show(struct device *device,
if (sl->reg_num.id == reg_num->id)
seq = i;
+ if (w1_reset_bus(sl->master))
+ goto error;
+
+ /* Put the device into chain DONE state */
+ w1_write_8(sl->master, W1_MATCH_ROM);
+ w1_write_block(sl->master, (u8 *)&rn, 8);
w1_write_8(sl->master, W1_42_CHAIN);
w1_write_8(sl->master, W1_42_CHAIN_DONE);
w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
- w1_read_block(sl->master, &ack, sizeof(ack));
/* check for acknowledgment */
ack = w1_read_8(sl->master);
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
-
}
/* Exit from CHAIN state */
diff --git a/include/dt-bindings/reset/delta,tn48m-reset.h b/include/dt-bindings/reset/delta,tn48m-reset.h
new file mode 100644
index 000000000000..d4e9ed12de3e
--- /dev/null
+++ b/include/dt-bindings/reset/delta,tn48m-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Delta TN48M CPLD GPIO driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#ifndef _DT_BINDINGS_RESET_TN48M_H
+#define _DT_BINDINGS_RESET_TN48M_H
+
+#define CPU_88F7040_RESET 0
+#define CPU_88F6820_RESET 1
+#define MAC_98DX3265_RESET 2
+#define PHY_88E1680_RESET 3
+#define PHY_88E1512_RESET 4
+#define POE_RESET 5
+
+#endif /* _DT_BINDINGS_RESET_TN48M_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index c3e5ab014caf..aad497a9ad8b 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -321,8 +321,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_ECC_DBE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
-#endif
-
/**
* Request INTEL_SIP_SMC_RSU_NOTIFY
*
@@ -404,3 +402,22 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
#define INTEL_SIP_SMC_RSU_MAX_RETRY \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 19781b0f6429..18c1841fdb1f 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -104,6 +104,9 @@ struct stratix10_svc_chan;
*
* @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
* is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
COMMAND_NOOP = 0,
@@ -117,6 +120,7 @@ enum stratix10_svc_command_code {
COMMAND_RSU_RETRY,
COMMAND_RSU_MAX_RETRY,
COMMAND_RSU_DCMF_VERSION,
+ COMMAND_FIRMWARE_VERSION,
};
/**
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 907cb01890cf..cf557fbeb8c7 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -143,6 +143,9 @@ enum pm_ioctl_id {
IOCTL_OSPI_MUX_SELECT = 21,
/* Register SGI to ATF */
IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
};
enum pm_query_id {
@@ -376,6 +379,14 @@ enum ospi_mux_select_type {
PM_OSPI_MUX_SEL_LINEAR = 1,
};
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -447,6 +458,8 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address);
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
const u32 wake, const u32 enable);
int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
#else
static inline int zynqmp_pm_get_api_version(u32 *version)
{
@@ -689,6 +702,18 @@ static inline int zynqmp_pm_feature(const u32 api_id)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 7a09b040ac39..2e25c838f831 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -14,14 +14,19 @@
struct device;
struct mux_control;
+struct mux_state;
unsigned int mux_control_states(struct mux_control *mux);
int __must_check mux_control_select_delay(struct mux_control *mux,
unsigned int state,
unsigned int delay_us);
+int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
int __must_check mux_control_try_select_delay(struct mux_control *mux,
unsigned int state,
unsigned int delay_us);
+int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
static inline int __must_check mux_control_select(struct mux_control *mux,
unsigned int state)
@@ -29,18 +34,31 @@ static inline int __must_check mux_control_select(struct mux_control *mux,
return mux_control_select_delay(mux, state, 0);
}
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return mux_state_select_delay(mstate, 0);
+}
+
static inline int __must_check mux_control_try_select(struct mux_control *mux,
unsigned int state)
{
return mux_control_try_select_delay(mux, state, 0);
}
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return mux_state_try_select_delay(mstate, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
+int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name);
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index c9a3ac9efeaa..50caa117cb62 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -135,8 +135,6 @@ void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
-int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
-
void nvmem_add_cell_table(struct nvmem_cell_table *table);
void nvmem_del_cell_table(struct nvmem_cell_table *table);
@@ -155,12 +153,6 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
return nvmem_register(c);
}
-static inline int
-devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
-{
- return -EOPNOTSUPP;
-}
-
static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
new file mode 100644
index 000000000000..ff8ae9c26c80
--- /dev/null
+++ b/include/linux/peci-cpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_CPU_H
+#define __LINUX_PECI_CPU_H
+
+#include <linux/types.h>
+
+#include "../../arch/x86/include/asm/intel-family.h"
+
+#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */
+#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */
+#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */
+#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */
+
+struct peci_device;
+
+int peci_temp_read(struct peci_device *device, s16 *temp_raw);
+
+int peci_pcs_read(struct peci_device *device, u8 index,
+ u16 param, u32 *data);
+
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev,
+ u8 func, u16 reg, u32 *data);
+
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data);
+
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data);
+
+#endif /* __LINUX_PECI_CPU_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..06e6ef935297
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Currently we don't support any PECI command over 32 bytes.
+ */
+#define PECI_REQUEST_MAX_BUF_SIZE 32
+
+struct peci_controller;
+struct peci_request;
+
+/**
+ * struct peci_controller_ops - PECI controller specific methods
+ * @xfer: PECI transfer function
+ *
+ * PECI controllers may have different hardware interfaces - the drivers
+ * implementing PECI controllers can use this structure to abstract away those
+ * differences by exposing a common interface for PECI core.
+ */
+struct peci_controller_ops {
+ int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req);
+};
+
+/**
+ * struct peci_controller - PECI controller
+ * @dev: device object to register PECI controller to the device model
+ * @ops: pointer to device specific controller operations
+ * @bus_lock: lock used to protect multiple callers
+ * @id: PECI controller ID
+ *
+ * PECI controllers usually connect to their drivers using non-PECI bus,
+ * such as the platform bus.
+ * Each PECI controller can communicate with one or more PECI devices.
+ */
+struct peci_controller {
+ struct device dev;
+ struct peci_controller_ops *ops;
+ struct mutex bus_lock; /* held for the duration of xfer */
+ u8 id;
+};
+
+struct peci_controller *devm_peci_controller_add(struct device *parent,
+ struct peci_controller_ops *ops);
+
+static inline struct peci_controller *to_peci_controller(void *d)
+{
+ return container_of(d, struct peci_controller, dev);
+}
+
+/**
+ * struct peci_device - PECI device
+ * @dev: device object to register PECI device to the device model
+ * @controller: manages the bus segment hosting this PECI device
+ * @info: PECI device characteristics
+ * @info.family: device family
+ * @info.model: device model
+ * @info.peci_revision: PECI revision supported by the PECI device
+ * @info.socket_id: the socket ID represented by the PECI device
+ * @addr: address used on the PECI bus connected to the parent controller
+ * @deleted: indicates that PECI device was already deleted
+ *
+ * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus.
+ * The behaviour exposed to the rest of the system is defined by the PECI driver
+ * managing the device.
+ */
+struct peci_device {
+ struct device dev;
+ struct {
+ u16 family;
+ u8 model;
+ u8 peci_revision;
+ u8 socket_id;
+ } info;
+ u8 addr;
+ bool deleted;
+};
+
+static inline struct peci_device *to_peci_device(struct device *d)
+{
+ return container_of(d, struct peci_device, dev);
+}
+
+/**
+ * struct peci_request - PECI request
+ * @device: PECI device to which the request is sent
+ * @tx: TX buffer specific data
+ * @tx.buf: TX buffer
+ * @tx.len: transfer data length in bytes
+ * @rx: RX buffer specific data
+ * @rx.buf: RX buffer
+ * @rx.len: received data length in bytes
+ *
+ * A peci_request represents a request issued by PECI originator (TX) and
+ * a response received from PECI responder (RX).
+ */
+struct peci_request {
+ struct peci_device *device;
+ struct {
+ u8 buf[PECI_REQUEST_MAX_BUF_SIZE];
+ u8 len;
+ } rx, tx;
+};
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 4ab7bfc675f1..3d780b44e678 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1095,7 +1095,7 @@ struct pcr_ops {
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
- void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime);
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
@@ -1201,8 +1201,6 @@ struct rtsx_pcr {
unsigned int card_exist;
struct delayed_work carddet_work;
- struct delayed_work idle_work;
- struct delayed_work rtd3_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -1212,7 +1210,6 @@ struct rtsx_pcr {
unsigned int cur_clock;
bool remove_pci;
bool msi_en;
- bool is_runtime_suspended;
#define EXTRA_CAPS_SD_SDR50 (1 << 0)
#define EXTRA_CAPS_SD_SDR104 (1 << 1)
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index e36cb114c188..6fb663b36f72 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -12,15 +12,20 @@
#include <linux/bits.h>
/* Register offsets. */
-#define VMCI_STATUS_ADDR 0x00
-#define VMCI_CONTROL_ADDR 0x04
-#define VMCI_ICR_ADDR 0x08
-#define VMCI_IMR_ADDR 0x0c
-#define VMCI_DATA_OUT_ADDR 0x10
-#define VMCI_DATA_IN_ADDR 0x14
-#define VMCI_CAPS_ADDR 0x18
-#define VMCI_RESULT_LOW_ADDR 0x1c
-#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_STATUS_ADDR 0x00
+#define VMCI_CONTROL_ADDR 0x04
+#define VMCI_ICR_ADDR 0x08
+#define VMCI_IMR_ADDR 0x0c
+#define VMCI_DATA_OUT_ADDR 0x10
+#define VMCI_DATA_IN_ADDR 0x14
+#define VMCI_CAPS_ADDR 0x18
+#define VMCI_RESULT_LOW_ADDR 0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+#define VMCI_DATA_OUT_LOW_ADDR 0x24
+#define VMCI_DATA_OUT_HIGH_ADDR 0x28
+#define VMCI_DATA_IN_LOW_ADDR 0x2c
+#define VMCI_DATA_IN_HIGH_ADDR 0x30
+#define VMCI_GUEST_PAGE_SHIFT 0x34
/* Max number of devices. */
#define VMCI_MAX_DEVICES 1
@@ -39,17 +44,27 @@
#define VMCI_CAPS_DATAGRAM BIT(2)
#define VMCI_CAPS_NOTIFICATIONS BIT(3)
#define VMCI_CAPS_PPN64 BIT(4)
+#define VMCI_CAPS_DMA_DATAGRAM BIT(5)
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM BIT(0)
#define VMCI_ICR_NOTIFICATION BIT(1)
+#define VMCI_ICR_DMA_DATAGRAM BIT(2)
/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM BIT(0)
#define VMCI_IMR_NOTIFICATION BIT(1)
+#define VMCI_IMR_DMA_DATAGRAM BIT(2)
-/* Maximum MSI/MSI-X interrupt vectors in the device. */
-#define VMCI_MAX_INTRS 2
+/*
+ * Maximum MSI/MSI-X interrupt vectors in the device.
+ * If VMCI_CAPS_DMA_DATAGRAM is supported by the device,
+ * VMCI_MAX_INTRS_DMA_DATAGRAM vectors are available,
+ * otherwise only VMCI_MAX_INTRS_NOTIFICATION.
+ */
+#define VMCI_MAX_INTRS_NOTIFICATION 2
+#define VMCI_MAX_INTRS_DMA_DATAGRAM 3
+#define VMCI_MAX_INTRS VMCI_MAX_INTRS_DMA_DATAGRAM
/*
* Supported interrupt vectors. There is one for each ICR value above,
@@ -58,6 +73,7 @@
enum {
VMCI_INTR_DATAGRAM = 0,
VMCI_INTR_NOTIFICATION = 1,
+ VMCI_INTR_DMA_DATAGRAM = 2,
};
/*
@@ -83,6 +99,52 @@ enum {
#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))
/*
+ * The version of the VMCI device that supports MMIO access to registers
+ * requests 256KB for BAR1 whereas the version of VMCI that supports
+ * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is:
+ * - the first 128KB are used for MSI/MSI-X.
+ * - the following 64KB are used for MMIO register access.
+ * - the remaining 64KB are unused.
+ */
+#define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024))
+#define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024))
+#define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024))
+
+/*
+ * For VMCI devices supporting the VMCI_CAPS_DMA_DATAGRAM capability, the
+ * sending and receiving of datagrams can be performed using DMA to/from
+ * a driver allocated buffer.
+ * Sending and receiving will be handled as follows:
+ * - when sending datagrams, the driver initializes the buffer where the
+ * data part will refer to the outgoing VMCI datagram, sets the busy flag
+ * to 1 and writes the address of the buffer to VMCI_DATA_OUT_HIGH_ADDR
+ * and VMCI_DATA_OUT_LOW_ADDR. Writing to VMCI_DATA_OUT_LOW_ADDR triggers
+ * the device processing of the buffer. When the device has processed the
+ * buffer, it will write the result value to the buffer and then clear the
+ * busy flag.
+ * - when receiving datagrams, the driver initializes the buffer where the
+ * data part will describe the receive buffer, clears the busy flag and
+ * writes the address of the buffer to VMCI_DATA_IN_HIGH_ADDR and
+ * VMCI_DATA_IN_LOW_ADDR. Writing to VMCI_DATA_IN_LOW_ADDR triggers the
+ * device processing of the buffer. The device will copy as many available
+ * datagrams into the buffer as possible, and then sets the busy flag.
+ * When the busy flag is set, the driver will process the datagrams in the
+ * buffer.
+ */
+struct vmci_data_in_out_header {
+ uint32_t busy;
+ uint32_t opcode;
+ uint32_t size;
+ uint32_t rsvd;
+ uint64_t result;
+};
+
+struct vmci_sg_elem {
+ uint64_t addr;
+ uint64_t size;
+};
+
+/*
* We have a fixed set of resource IDs available in the VMX.
* This allows us to have a very simple implementation since we statically
* know how many will create datagram handles. If a new caller arrives and
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 9832cb8e0eb0..c9a72e8432b8 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,92 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_slave_init,
+ TP_PROTO(const struct fsi_slave *slave),
+ TP_ARGS(slave),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, idx)
+ __field(int, link)
+ __field(int, chip_id)
+ __field(__u32, cfam_id)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = slave->master->idx;
+ __entry->master_n_links = slave->master->n_links;
+ __entry->idx = slave->cdev_idx;
+ __entry->link = slave->link;
+ __entry->chip_id = slave->chip_id;
+ __entry->cfam_id = slave->cfam_id;
+ __entry->size = slave->size;
+ ),
+ TP_printk("fsi%d: idx:%d link:%d/%d cid:%d cfam:%08x %08x",
+ __entry->master_idx,
+ __entry->idx,
+ __entry->link,
+ __entry->master_n_links,
+ __entry->chip_id,
+ __entry->cfam_id,
+ __entry->size
+ )
+);
+
+TRACE_EVENT(fsi_slave_invalid_cfam,
+ TP_PROTO(const struct fsi_master *master, int link, uint32_t cfam_id),
+ TP_ARGS(master, link, cfam_id),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, master_n_links)
+ __field(int, link)
+ __field(__u32, cfam_id)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->master_n_links = master->n_links;
+ __entry->link = link;
+ __entry->cfam_id = cfam_id;
+ ),
+ TP_printk("fsi%d: cfam:%08x link:%d/%d",
+ __entry->master_idx,
+ __entry->cfam_id,
+ __entry->link,
+ __entry->master_n_links
+ )
+);
+
+TRACE_EVENT(fsi_dev_init,
+ TP_PROTO(const struct fsi_device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, type)
+ __field(int, unit)
+ __field(int, version)
+ __field(__u32, addr)
+ __field(__u32, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = dev->slave->master->idx;
+ __entry->link = dev->slave->link;
+ __entry->type = dev->engine_type;
+ __entry->unit = dev->unit;
+ __entry->version = dev->version;
+ __entry->addr = dev->addr;
+ __entry->size = dev->size;
+ ),
+ TP_printk("fsi%d: slv%d: t:%02x u:%02x v:%02x %08x@%08x",
+ __entry->master_idx,
+ __entry->link,
+ __entry->type,
+ __entry->unit,
+ __entry->version,
+ __entry->size,
+ __entry->addr
+ )
+);
#endif /* _TRACE_FSI_H */
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
index a355ceacc33f..0fff873775f1 100644
--- a/include/trace/events/fsi_master_aspeed.h
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -72,6 +72,18 @@ TRACE_EVENT(fsi_master_aspeed_opb_error,
)
);
+TRACE_EVENT(fsi_master_aspeed_cfam_reset,
+ TP_PROTO(bool start),
+ TP_ARGS(start),
+ TP_STRUCT__entry(
+ __field(bool, start)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ ),
+ TP_printk("%s", __entry->start ? "start" : "end")
+);
+
#endif
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index da577ecd90e7..b2f1977378c7 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -55,4 +55,18 @@ struct scom_access {
#define FSI_SCOM_WRITE _IOWR('s', 0x02, struct scom_access)
#define FSI_SCOM_RESET _IOW('s', 0x03, __u32)
+/*
+ * /dev/sbefifo* ioctl interface
+ */
+
+/**
+ * FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
+ *
+ * The read timeout is specified in seconds. The minimum value of read
+ * timeout is 10 seconds (default) and the maximum value of read timeout is
+ * 120 seconds. A read timeout of 0 will reset the value to the default of
+ * (10 seconds).
+ */
+#define FSI_SBEFIFO_READ_TIMEOUT_SECONDS _IOW('s', 0x00, __u32)
+
#endif /* _UAPI_LINUX_FSI_H */
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index eb0029c9a6a6..e400fbbc8aba 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -1,5 +1,5 @@
# KEEP ALPHABETICALLY SORTED
-# CONFIG_AIO is not set
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
index a26a3fa9e925..46f39ee76208 100644
--- a/tools/testing/selftests/lkdtm/config
+++ b/tools/testing/selftests/lkdtm/config
@@ -3,9 +3,9 @@ CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_HARDENED_USERCOPY=y
-# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_UBSAN=y
CONFIG_UBSAN_BOUNDS=y
CONFIG_UBSAN_TRAP=y
CONFIG_STACKPROTECTOR_STRONG=y