summaryrefslogtreecommitdiff
path: root/drivers/platform
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform')
-rw-r--r--drivers/platform/Kconfig4
-rw-r--r--drivers/platform/Makefile2
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c58
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c1
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c16
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c11
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c17
-rw-r--r--drivers/platform/raspberrypi/Kconfig52
-rw-r--r--drivers/platform/raspberrypi/Makefile15
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TESTING125
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TODO4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c1477
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c112
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c4013
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c157
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c1355
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h112
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Kconfig7
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Makefile4
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h65
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h124
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h45
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h108
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h109
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h406
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h752
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c1949
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h162
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c13
-rw-r--r--drivers/platform/wmi/Kconfig34
-rw-r--r--drivers/platform/wmi/Makefile8
-rw-r--r--drivers/platform/wmi/core.c (renamed from drivers/platform/x86/wmi.c)34
-rw-r--r--drivers/platform/x86/Kconfig72
-rw-r--r--drivers/platform/x86/Makefile8
-rw-r--r--drivers/platform/x86/acer-wmi.c290
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c11
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c9
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c14
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c23
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h27
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c38
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c92
-rw-r--r--drivers/platform/x86/asus-armoury.c1161
-rw-r--r--drivers/platform/x86/asus-armoury.h1541
-rw-r--r--drivers/platform/x86/asus-wmi.c185
-rw-r--r--drivers/platform/x86/ayaneo-ec.c593
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c124
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c4
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c24
-rw-r--r--drivers/platform/x86/intel/Kconfig13
-rw-r--r--drivers/platform/x86/intel/Makefile1
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c29
-rw-r--r--drivers/platform/x86/intel/ehl_pse_io.c86
-rw-r--r--drivers/platform/x86/intel/hid.c12
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c12
-rw-r--r--drivers/platform/x86/intel/pmc/core.c149
-rw-r--r--drivers/platform/x86/intel/pmc/core.h16
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c9
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c3
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c18
-rw-r--r--drivers/platform/x86/intel/vsec.c2
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c218
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c35
-rw-r--r--drivers/platform/x86/lg-laptop.c11
-rw-r--r--drivers/platform/x86/oxpec.c115
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c13
-rw-r--r--drivers/platform/x86/uniwill/Kconfig38
-rw-r--r--drivers/platform/x86/uniwill/Makefile8
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c1912
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.c92
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.h129
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c2
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c2
80 files changed, 17900 insertions, 618 deletions
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 960fd6a82450..312788f249c9 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -18,3 +18,7 @@ source "drivers/platform/surface/Kconfig"
source "drivers/platform/x86/Kconfig"
source "drivers/platform/arm64/Kconfig"
+
+source "drivers/platform/raspberrypi/Kconfig"
+
+source "drivers/platform/wmi/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 19ac54648586..fa322e7f8716 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/
obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/
+obj-$(CONFIG_BCM2835_VCHIQ) += raspberrypi/
+obj-$(CONFIG_ACPI_WMI) += wmi/
diff --git a/drivers/platform/arm64/lenovo-thinkpad-t14s.c b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
index cf6a1d3b2617..5590302a5694 100644
--- a/drivers/platform/arm64/lenovo-thinkpad-t14s.c
+++ b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
@@ -20,19 +20,23 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#define T14S_EC_CMD_ECRD 0x02
#define T14S_EC_CMD_ECWR 0x03
#define T14S_EC_CMD_EVT 0xf0
-#define T14S_EC_REG_LED 0x0c
-#define T14S_EC_REG_KBD_BL1 0x0d
-#define T14S_EC_REG_KBD_BL2 0xe1
-#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6)
-#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2)
-#define T14S_EC_REG_AUD 0x30
-#define T14S_EC_MIC_MUTE_LED BIT(5)
-#define T14S_EC_SPK_MUTE_LED BIT(6)
+#define T14S_EC_REG_LED 0x0c
+#define T14S_EC_REG_KBD_BL1 0x0d
+#define T14S_EC_REG_MODERN_STANDBY 0xe0
+#define T14S_EC_MODERN_STANDBY_ENTRY BIT(1)
+#define T14S_EC_MODERN_STANDBY_EXIT BIT(0)
+#define T14S_EC_REG_KBD_BL2 0xe1
+#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6)
+#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2)
+#define T14S_EC_REG_AUD 0x30
+#define T14S_EC_MIC_MUTE_LED BIT(5)
+#define T14S_EC_SPK_MUTE_LED BIT(6)
#define T14S_EC_EVT_NONE 0x00
#define T14S_EC_EVT_KEY_FN_4 0x13
@@ -202,6 +206,14 @@ out:
return ret;
}
+static void t14s_ec_write_sequence(struct t14s_ec *ec, u8 reg, u8 val, u8 cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++)
+ regmap_write(ec->regmap, reg, val);
+}
+
static int t14s_led_set_status(struct t14s_ec *ec,
struct t14s_ec_led_classdev *led,
const enum t14s_ec_led_status_t ledstatus)
@@ -554,6 +566,7 @@ static int t14s_ec_probe(struct i2c_client *client)
return -ENOMEM;
ec->dev = dev;
+ i2c_set_clientdata(client, ec);
ec->regmap = devm_regmap_init(dev, &t14s_ec_regmap_bus,
ec, &t14s_ec_regmap_config);
@@ -593,6 +606,30 @@ static int t14s_ec_probe(struct i2c_client *client)
return 0;
}
+static int t14s_ec_suspend(struct device *dev)
+{
+ struct t14s_ec *ec = dev_get_drvdata(dev);
+
+ led_classdev_suspend(&ec->kbd_backlight);
+
+ t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY,
+ T14S_EC_MODERN_STANDBY_ENTRY, 3);
+
+ return 0;
+}
+
+static int t14s_ec_resume(struct device *dev)
+{
+ struct t14s_ec *ec = dev_get_drvdata(dev);
+
+ t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY,
+ T14S_EC_MODERN_STANDBY_EXIT, 3);
+
+ led_classdev_resume(&ec->kbd_backlight);
+
+ return 0;
+}
+
static const struct of_device_id t14s_ec_of_match[] = {
{ .compatible = "lenovo,thinkpad-t14s-ec" },
{}
@@ -605,10 +642,15 @@ static const struct i2c_device_id t14s_ec_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, t14s_ec_i2c_id_table);
+static const struct dev_pm_ops t14s_ec_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(t14s_ec_suspend, t14s_ec_resume)
+};
+
static struct i2c_driver t14s_ec_i2c_driver = {
.driver = {
.name = "thinkpad-t14s-ec",
.of_match_table = t14s_ec_of_match,
+ .pm = &t14s_ec_pm_ops,
},
.probe = t14s_ec_probe,
.id_table = t14s_ec_i2c_id_table,
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 4e74e702c5a2..3766cef81fe8 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -667,6 +667,7 @@ static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
cancel_work_sync(&client_data->work_ishtp_reset);
cancel_work_sync(&client_data->work_ec_evt);
+ cros_ec_unregister(client_data->ec_dev);
cros_ish_deinit(cros_ish_cl);
ishtp_put_device(cl_device);
}
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index 87634f6921b7..8352e9732791 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -30,6 +30,13 @@ static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
*/
static bool userspace_control;
+/*
+ * Whether or not the lightbar supports the manual suspend commands.
+ * The Pixel 2013 (Link) does not while all other devices with a
+ * lightbar do.
+ */
+static bool has_manual_suspend;
+
static ssize_t interval_msec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -550,7 +557,7 @@ static int cros_ec_lightbar_probe(struct platform_device *pd)
return -ENODEV;
/* Take control of the lightbar from the EC. */
- lb_manual_suspend_ctrl(ec_dev, 1);
+ has_manual_suspend = (lb_manual_suspend_ctrl(ec_dev, 1) != -EINVAL);
ret = sysfs_create_group(&ec_dev->class_dev.kobj,
&cros_ec_lightbar_attr_group);
@@ -569,14 +576,15 @@ static void cros_ec_lightbar_remove(struct platform_device *pd)
&cros_ec_lightbar_attr_group);
/* Let the EC take over the lightbar again. */
- lb_manual_suspend_ctrl(ec_dev, 0);
+ if (has_manual_suspend)
+ lb_manual_suspend_ctrl(ec_dev, 0);
}
static int __maybe_unused cros_ec_lightbar_resume(struct device *dev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- if (userspace_control)
+ if (userspace_control || !has_manual_suspend)
return 0;
return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME);
@@ -586,7 +594,7 @@ static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- if (userspace_control)
+ if (userspace_control || !has_manual_suspend)
return 0;
return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND);
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 1205219515d6..a10579144c34 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -129,6 +129,17 @@ int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
/* We expect to receive a payload of 4 bytes, ignore. */
if (ret > 0)
ret = 0;
+ /*
+ * Some platforms (such as Smaug) don't support the FIFO_INT_ENABLE
+ * command and the interrupt is always enabled. In the case, it
+ * returns -EINVAL.
+ *
+ * N.B: there is no danger of -EINVAL meaning any other invalid
+ * parameter since fifo_int_enable.enable is a bool and can never
+ * be in an invalid range.
+ */
+ else if (ret == -EINVAL)
+ ret = 0;
return ret;
}
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index 313d2bcd577b..c90174360004 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/fwnode.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -15,6 +16,7 @@
#define DRV_NAME "cros-usbpd-notify"
#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi"
#define ACPI_DRV_NAME "GOOG0003"
+#define CREC_DRV_NAME "GOOG0004"
static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list);
@@ -98,8 +100,9 @@ static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
{
struct cros_usbpd_notify_data *pdnotify;
struct device *dev = &pdev->dev;
- struct acpi_device *adev;
+ struct acpi_device *adev, *parent_adev;
struct cros_ec_device *ec_dev;
+ struct fwnode_handle *parent_fwnode;
acpi_status status;
adev = ACPI_COMPANION(dev);
@@ -114,8 +117,18 @@ static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
/*
* We continue even for older devices which don't have the
* correct device heirarchy, namely, GOOG0003 is a child
- * of GOOG0004.
+ * of GOOG0004. If GOOG0003 is a child of GOOG0004 and we
+ * can't get a pointer to the Chrome EC device, defer the
+ * probe function.
*/
+ parent_fwnode = fwnode_get_parent(dev->fwnode);
+ if (parent_fwnode) {
+ parent_adev = to_acpi_device_node(parent_fwnode);
+ if (parent_adev &&
+ acpi_dev_hid_match(parent_adev, CREC_DRV_NAME)) {
+ return -EPROBE_DEFER;
+ }
+ }
dev_warn(dev, "Couldn't get Chrome EC device pointer.\n");
}
diff --git a/drivers/platform/raspberrypi/Kconfig b/drivers/platform/raspberrypi/Kconfig
new file mode 100644
index 000000000000..2c928440a47c
--- /dev/null
+++ b/drivers/platform/raspberrypi/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig BCM_VIDEOCORE
+ tristate "Broadcom VideoCore support"
+ depends on OF
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
+ default y
+ help
+ Support for Broadcom VideoCore services including
+ the BCM2835 family of products which is used
+ by the Raspberry PI.
+
+if BCM_VIDEOCORE
+
+config BCM2835_VCHIQ
+ tristate "BCM2835 VCHIQ"
+ depends on HAS_DMA
+ imply VCHIQ_CDEV
+ help
+ Broadcom BCM2835 and similar SoCs have a VPU called VideoCore.
+ This config enables the VCHIQ driver, which implements a
+ messaging interface between the kernel and the firmware running
+ on VideoCore. Other drivers use this interface to communicate to
+ the VPU. More specifically, the VCHIQ driver is used by
+ audio/video and camera drivers as well as for implementing MMAL
+ API, which is in turn used by several multimedia services on the
+ BCM2835 family of SoCs.
+
+ Defaults to Y when the Broadcom Videocore services are included
+ in the build, N otherwise.
+
+if BCM2835_VCHIQ
+
+config VCHIQ_CDEV
+ bool "VCHIQ Character Driver"
+ help
+ Enable the creation of VCHIQ character driver. The cdev exposes
+ ioctls used by userspace libraries and testing tools to interact
+ with VideoCore, via the VCHIQ core driver (Check BCM2835_VCHIQ
+ for more info).
+
+ This can be set to 'N' if the VideoCore communication is not
+ needed by userspace but only by other kernel modules
+ (like bcm2835-audio).
+
+ If not sure, set this to 'Y'.
+
+endif
+
+source "drivers/platform/raspberrypi/vchiq-mmal/Kconfig"
+
+endif
diff --git a/drivers/platform/raspberrypi/Makefile b/drivers/platform/raspberrypi/Makefile
new file mode 100644
index 000000000000..2a7c9511e5d8
--- /dev/null
+++ b/drivers/platform/raspberrypi/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
+
+vchiq-objs := \
+ vchiq-interface/vchiq_core.o \
+ vchiq-interface/vchiq_arm.o \
+ vchiq-interface/vchiq_bus.o \
+ vchiq-interface/vchiq_debugfs.o \
+
+ifdef CONFIG_VCHIQ_CDEV
+vchiq-objs += vchiq-interface/vchiq_dev.o
+endif
+
+obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/
diff --git a/drivers/platform/raspberrypi/vchiq-interface/TESTING b/drivers/platform/raspberrypi/vchiq-interface/TESTING
new file mode 100644
index 000000000000..c98f688b07e0
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/TESTING
@@ -0,0 +1,125 @@
+This document contains some hints to test the function of the VCHIQ driver
+without having additional hardware to the Raspberry Pi.
+
+* Requirements & limitations
+
+Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC:
+ - BCM2835 ( e.g. Raspberry Pi Zero W )
+ - BCM2836 ( e.g. Raspberry Pi 2 )
+ - BCM2837 ( e.g. Raspberry Pi 3 B+ )
+
+The BCM2711 used in the Raspberry Pi 4 is currently not supported in the
+mainline kernel.
+
+There are no specific requirements to the VideoCore firmware to get VCHIQ
+working.
+
+The test scenarios described in this document based on the tool vchiq_test.
+Its source code is available here: https://github.com/raspberrypi/userland
+
+* Configuration
+
+Here are the most common kernel configurations:
+
+ 1. BCM2835 target SoC (ARM 32 bit)
+
+ Just use bcm2835_defconfig which already has VCHIQ enabled.
+
+ 2. BCM2836/7 target SoC (ARM 32 bit)
+
+ Use the multi_v7_defconfig as a base and then enable all VCHIQ options.
+
+ 3. BCM2837 target SoC (ARM 64 bit)
+
+ Use the defconfig which has most of the VCHIQ options enabled.
+
+* Scenarios
+
+ * Initial test
+
+ Check the driver is probed and /dev/vchiq is created
+
+ * Functional test
+
+ Command: vchiq_test -f 10
+
+ Expected output:
+ Functional test - iters:10
+ ======== iteration 1 ========
+ Testing bulk transfer for alignment.
+ Testing bulk transfer at PAGE_SIZE.
+ ...
+
+ * Ping test
+
+ Command: vchiq_test -p
+
+ Expected output:
+ Ping test - service:echo, iters:1000, version 3
+ vchi ping (size 0) -> 57.000000us
+ vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us
+ vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us
+ vchi bulk (size 0, 0 oneway) -> 230.000000us
+ vchi ping (size 0) -> 49.000000us
+ vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us
+ vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us
+ vchi bulk (size 0, 0 oneway) -> 266.000000us
+ vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us
+ vchi bulk (size 0, 0 oneway) -> 456.000000us
+ vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us
+ vchi bulk (size 0, 0 oneway) -> 640.000000us
+ vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us
+ vchi bulk (size 0, 0 oneway) -> 2309.000000us
+ vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us
+ vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us
+ vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us
+ vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us
+ vchi ping (size 0, 100 async, 0 oneway) -> nanus
+ vchi bulk (size 0, 0 oneway) -> nanus
+ vchi ping (size 0, 0 async, 100 oneway) -> nanus
+ vchi ping (size 0, 100 async, 100 oneway) -> infus
+ vchi ping (size 0, 200 async, 0 oneway) -> infus
+ ...
+
+ * Debugfs test
+
+ Command: cat /sys/kernel/debug/vchiq/state
+
+ Example output:
+ State 0: CONNECTED
+ tx_pos=0x1e8(@43b0acda), rx_pos=0x170(@05493af8)
+ Version: 8 (min 3)
+ Stats: ctrl_tx_count=7, ctrl_rx_count=7, error_count=0
+ Slots: 30 available (29 data), 0 recyclable, 0 stalls (0 data)
+ Platform: 2835 (VC master)
+ Local: slots 34-64 tx_pos=0x1e8 recycle=0x1f
+ Slots claimed:
+ DEBUG: SLOT_HANDLER_COUNT = 20(0x14)
+ DEBUG: SLOT_HANDLER_LINE = 1937(0x791)
+ DEBUG: PARSE_LINE = 1864(0x748)
+ DEBUG: PARSE_HEADER = -249155224(0xf1263168)
+ DEBUG: PARSE_MSGID = 67362817(0x403e001)
+ DEBUG: AWAIT_COMPLETION_LINE = 0(0x0)
+ DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0)
+ DEBUG: SERVICE_CALLBACK_LINE = 0(0x0)
+ DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0)
+ DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0)
+ Remote: slots 2-32 tx_pos=0x170 recycle=0x1f
+ Slots claimed:
+ 2: 10/9
+ DEBUG: SLOT_HANDLER_COUNT = 20(0x14)
+ DEBUG: SLOT_HANDLER_LINE = 1851(0x73b)
+ DEBUG: PARSE_LINE = 1827(0x723)
+ DEBUG: PARSE_HEADER = -150330912(0xf70a21e0)
+ DEBUG: PARSE_MSGID = 67113022(0x400103e)
+ DEBUG: AWAIT_COMPLETION_LINE = 0(0x0)
+ DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0)
+ DEBUG: SERVICE_CALLBACK_LINE = 0(0x0)
+ DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0)
+ DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0)
+ Service 0: LISTENING (ref 1) 'PEEK little-endian (0x4b454550)' remote n/a (msg use 0/3840, slot use 0/15)
+ Bulk: tx_pending=0 (size 0), rx_pending=0 (size 0)
+ Ctrl: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0
+ Bulk: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0
+ 0 quota stalls, 0 slot stalls, 0 bulk stalls, 0 aborted, 0 errors
+ instance b511f60b
diff --git a/drivers/platform/raspberrypi/vchiq-interface/TODO b/drivers/platform/raspberrypi/vchiq-interface/TODO
new file mode 100644
index 000000000000..2357dae413f1
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/TODO
@@ -0,0 +1,4 @@
+* Documentation
+
+A short top-down description of this driver's architecture (function of
+kthreads, userspace, limitations) could be very helpful for reviewers.
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
new file mode 100644
index 000000000000..6a7b96d3dae6
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
@@ -0,0 +1,1477 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/compat.h>
+#include <linux/dma-mapping.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#include <linux/raspberrypi/vchiq_core.h>
+#include <linux/raspberrypi/vchiq_arm.h>
+#include <linux/raspberrypi/vchiq_bus.h>
+#include <linux/raspberrypi/vchiq_debugfs.h>
+
+#include "vchiq_ioctl.h"
+
+#define DEVICE_NAME "vchiq"
+
+#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
+
+#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
+
+#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
+#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
+
+#define BELL0 0x00
+
+#define ARM_DS_ACTIVE BIT(2)
+
+/* Override the default prefix, which would be vchiq_arm (from the filename) */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX DEVICE_NAME "."
+
+#define KEEPALIVE_VER 1
+#define KEEPALIVE_VER_MIN KEEPALIVE_VER
+
+/*
+ * The devices implemented in the VCHIQ firmware are not discoverable,
+ * so we need to maintain a list of them in order to register them with
+ * the interface.
+ */
+static struct vchiq_device *bcm2835_audio;
+
+static const struct vchiq_platform_info bcm2835_info = {
+ .cache_line_size = 32,
+};
+
+static const struct vchiq_platform_info bcm2836_info = {
+ .cache_line_size = 64,
+};
+
+struct vchiq_arm_state {
+ /*
+ * Keepalive-related data
+ *
+ * The keepalive mechanism was retro-fitted to VCHIQ to allow active
+ * services to prevent the system from suspending.
+ * This feature is not used on Raspberry Pi devices.
+ */
+ struct task_struct *ka_thread;
+ struct completion ka_evt;
+ atomic_t ka_use_count;
+ atomic_t ka_use_ack_count;
+ atomic_t ka_release_count;
+
+ rwlock_t susp_res_lock;
+
+ struct vchiq_state *state;
+
+ /*
+ * Global use count for videocore.
+ * This is equal to the sum of the use counts for all services. When
+ * this hits zero the videocore suspend procedure will be initiated.
+ */
+ int videocore_use_count;
+
+ /*
+ * Use count to track requests from videocore peer.
+ * This use count is not associated with a service, so needs to be
+ * tracked separately with the state.
+ */
+ int peer_use_count;
+
+ /*
+ * Flag to indicate that the first vchiq connect has made it through.
+ * This means that both sides should be fully ready, and we should
+ * be able to suspend after this point.
+ */
+ int first_connect;
+};
+
+static int
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk_params);
+
+static irqreturn_t
+vchiq_doorbell_irq(int irq, void *dev_id)
+{
+ struct vchiq_state *state = dev_id;
+ struct vchiq_drv_mgmt *mgmt;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int status;
+
+ mgmt = dev_get_drvdata(state->dev);
+
+ /* Read (and clear) the doorbell */
+ status = readl(mgmt->regs + BELL0);
+
+ if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
+ remote_event_pollall(state);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/*
+ * This function is called by the vchiq stack once it has been connected to
+ * the videocore and clients can start to use the stack.
+ */
+static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
+{
+ int i;
+
+ if (mutex_lock_killable(&drv_mgmt->connected_mutex))
+ return;
+
+ for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
+ drv_mgmt->deferred_callback[i]();
+
+ drv_mgmt->num_deferred_callbacks = 0;
+ drv_mgmt->connected = true;
+ mutex_unlock(&drv_mgmt->connected_mutex);
+}
+
+/*
+ * This function is used to defer initialization until the vchiq stack is
+ * initialized. If the stack is already initialized, then the callback will
+ * be made immediately, otherwise it will be deferred until
+ * vchiq_call_connected_callbacks is called.
+ */
+void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
+{
+ struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
+
+ if (mutex_lock_killable(&drv_mgmt->connected_mutex))
+ return;
+
+ if (drv_mgmt->connected) {
+ /* We're already connected. Call the callback immediately. */
+ callback();
+ } else {
+ if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
+ dev_err(&device->dev,
+ "core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
+ drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
+ } else {
+ drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
+ callback;
+ drv_mgmt->num_deferred_callbacks++;
+ }
+ }
+ mutex_unlock(&drv_mgmt->connected_mutex);
+}
+EXPORT_SYMBOL(vchiq_add_connected_callback);
+
+static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
+{
+ struct device *dev = &pdev->dev;
+ struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
+ struct rpi_firmware *fw = drv_mgmt->fw;
+ struct vchiq_slot_zero *vchiq_slot_zero;
+ void *slot_mem;
+ dma_addr_t slot_phys;
+ u32 channelbase;
+ int slot_mem_size, frag_mem_size;
+ int err, irq, i;
+
+ /*
+ * VCHI messages between the CPU and firmware use
+ * 32-bit bus addresses.
+ */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (err < 0)
+ return err;
+
+ drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
+
+ /* Allocate space for the channels in coherent memory */
+ slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
+ frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
+
+ slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
+ &slot_phys, GFP_KERNEL);
+ if (!slot_mem) {
+ dev_err(dev, "could not allocate DMA memory\n");
+ return -ENOMEM;
+ }
+
+ WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
+
+ vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
+ if (!vchiq_slot_zero)
+ return -ENOMEM;
+
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
+ (int)slot_phys + slot_mem_size;
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
+ MAX_FRAGMENTS;
+
+ drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
+
+ drv_mgmt->free_fragments = drv_mgmt->fragments_base;
+ for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
+ *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
+ &drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
+ }
+ *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
+ sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
+ sema_init(&drv_mgmt->free_fragments_mutex, 1);
+
+ err = vchiq_init_state(state, vchiq_slot_zero, dev);
+ if (err)
+ return err;
+
+ drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drv_mgmt->regs))
+ return PTR_ERR(drv_mgmt->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
+ "VCHIQ doorbell", state);
+ if (err) {
+ dev_err(dev, "failed to register irq=%d\n", irq);
+ return err;
+ }
+
+ /* Send the base address of the slots to VideoCore */
+ channelbase = slot_phys;
+ err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
+ &channelbase, sizeof(channelbase));
+ if (err) {
+ dev_err(dev, "failed to send firmware property: %d\n", err);
+ return err;
+ }
+
+ if (channelbase) {
+ dev_err(dev, "failed to set channelbase (response: %x)\n",
+ channelbase);
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
+ vchiq_slot_zero, &slot_phys);
+
+ mutex_init(&drv_mgmt->connected_mutex);
+ vchiq_call_connected_callbacks(drv_mgmt);
+
+ return 0;
+}
+
+int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
+static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+{
+ return (struct vchiq_arm_state *)state->platform_state;
+}
+
+static void
+vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
+{
+ struct vchiq_arm_state *arm_state;
+
+ kthread_stop(mgmt->state.sync_thread);
+ kthread_stop(mgmt->state.recycle_thread);
+ kthread_stop(mgmt->state.slot_handler_thread);
+
+ arm_state = vchiq_platform_get_arm_state(&mgmt->state);
+ if (!IS_ERR_OR_NULL(arm_state->ka_thread))
+ kthread_stop(arm_state->ka_thread);
+}
+
+void vchiq_dump_platform_state(struct seq_file *f)
+{
+ seq_puts(f, " Platform: 2835 (VC master)\n");
+}
+
+#define VCHIQ_INIT_RETRIES 10
+int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
+{
+ struct vchiq_instance *instance = NULL;
+ int i, ret;
+
+ /*
+ * VideoCore may not be ready due to boot up timing.
+ * It may never be ready if kernel and firmware are mismatched,so don't
+ * block forever.
+ */
+ for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
+ if (vchiq_remote_initialised(state))
+ break;
+ usleep_range(500, 600);
+ }
+ if (i == VCHIQ_INIT_RETRIES) {
+ dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
+ ret = -ENOTCONN;
+ goto failed;
+ } else if (i > 0) {
+ dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
+ __func__, i);
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ instance->connected = 0;
+ instance->state = state;
+ mutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ *instance_out = instance;
+
+ ret = 0;
+
+failed:
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_initialise);
+
+void free_bulk_waiter(struct vchiq_instance *instance)
+{
+ struct bulk_waiter_node *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next,
+ &instance->bulk_waiter_list, list) {
+ list_del(&waiter->list);
+ dev_dbg(instance->state->dev,
+ "arm: bulk_waiter - cleaned up %p for pid %d\n",
+ waiter, waiter->pid);
+ kfree(waiter);
+ }
+}
+
+int vchiq_shutdown(struct vchiq_instance *instance)
+{
+ struct vchiq_state *state = instance->state;
+ int ret = 0;
+
+ mutex_lock(&state->mutex);
+
+ /* Remove all services */
+ vchiq_shutdown_internal(state, instance);
+
+ mutex_unlock(&state->mutex);
+
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
+
+ free_bulk_waiter(instance);
+ kfree(instance);
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_shutdown);
+
+static int vchiq_is_connected(struct vchiq_instance *instance)
+{
+ return instance->connected;
+}
+
+int vchiq_connect(struct vchiq_instance *instance)
+{
+ struct vchiq_state *state = instance->state;
+ int ret;
+
+ if (mutex_lock_killable(&state->mutex)) {
+ dev_dbg(state->dev,
+ "core: call to mutex_lock failed\n");
+ ret = -EAGAIN;
+ goto failed;
+ }
+ ret = vchiq_connect_internal(state, instance);
+
+ if (!ret)
+ instance->connected = 1;
+
+ mutex_unlock(&state->mutex);
+
+failed:
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_connect);
+
+static int
+vchiq_add_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *phandle)
+{
+ struct vchiq_state *state = instance->state;
+ struct vchiq_service *service = NULL;
+ int srvstate, ret;
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ srvstate = vchiq_is_connected(instance)
+ ? VCHIQ_SRVSTATE_LISTENING
+ : VCHIQ_SRVSTATE_HIDDEN;
+
+ service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
+
+ return ret;
+}
+
+int
+vchiq_open_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *phandle)
+{
+ struct vchiq_state *state = instance->state;
+ struct vchiq_service *service = NULL;
+ int ret = -EINVAL;
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ if (!vchiq_is_connected(instance))
+ goto failed;
+
+ service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ ret = vchiq_open_service_internal(service, current->pid);
+ if (ret) {
+ vchiq_remove_service(instance, service->handle);
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+ }
+ }
+
+failed:
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_open_service);
+
+int
+vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
+{
+ struct vchiq_bulk bulk_params = {};
+ int ret;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+
+ bulk_params.offset = (void *)data;
+ bulk_params.mode = mode;
+ bulk_params.size = size;
+ bulk_params.cb_data = userdata;
+ bulk_params.dir = VCHIQ_BULK_TRANSMIT;
+
+ ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ bulk_params.offset = (void *)data;
+ bulk_params.mode = mode;
+ bulk_params.size = size;
+ bulk_params.dir = VCHIQ_BULK_TRANSMIT;
+
+ ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_bulk_transmit);
+
+int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode)
+{
+ struct vchiq_bulk bulk_params = {};
+ int ret;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+
+ bulk_params.offset = (void *)data;
+ bulk_params.mode = mode;
+ bulk_params.size = size;
+ bulk_params.cb_data = userdata;
+ bulk_params.dir = VCHIQ_BULK_RECEIVE;
+
+ ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ bulk_params.offset = (void *)data;
+ bulk_params.mode = mode;
+ bulk_params.size = size;
+ bulk_params.dir = VCHIQ_BULK_RECEIVE;
+
+ ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_bulk_receive);
+
+static int
+vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk_params)
+{
+ struct vchiq_service *service;
+ struct bulk_waiter_node *waiter = NULL, *iter;
+ int ret;
+
+ service = find_service_by_handle(instance, handle);
+ if (!service)
+ return -EINVAL;
+
+ vchiq_service_put(service);
+
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
+ break;
+ }
+ }
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+
+ if (waiter) {
+ struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
+
+ if (bulk) {
+ /* This thread has an outstanding bulk transfer. */
+ /* FIXME: why compare a dma address to a pointer? */
+ if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) ||
+ (bulk->size != bulk_params->size)) {
+ /*
+ * This is not a retry of the previous one.
+ * Cancel the signal when the transfer completes.
+ */
+ spin_lock(&service->state->bulk_waiter_spinlock);
+ bulk->waiter = NULL;
+ spin_unlock(&service->state->bulk_waiter_spinlock);
+ }
+ }
+ } else {
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter)
+ return -ENOMEM;
+ }
+
+ bulk_params->waiter = &waiter->bulk_waiter;
+
+ ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params);
+ if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
+ struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
+
+ if (bulk) {
+ /* Cancel the signal when the transfer completes. */
+ spin_lock(&service->state->bulk_waiter_spinlock);
+ bulk->waiter = NULL;
+ spin_unlock(&service->state->bulk_waiter_spinlock);
+ }
+ kfree(waiter);
+ } else {
+ waiter->pid = current->pid;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
+ waiter, current->pid);
+ }
+
+ return ret;
+}
+
+static int
+add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
+ struct vchiq_header *header, struct user_service *user_service,
+ void *cb_data, void __user *cb_userdata)
+{
+ struct vchiq_completion_data_kernel *completion;
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
+ int insert;
+
+ DEBUG_INITIALISE(mgmt->state.local);
+
+ insert = instance->completion_insert;
+ while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
+ /* Out of space - wait for the client */
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ dev_dbg(instance->state->dev, "core: completion queue full\n");
+ DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
+ if (wait_for_completion_interruptible(&instance->remove_event)) {
+ dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
+ return -EAGAIN;
+ } else if (instance->closing) {
+ dev_dbg(instance->state->dev, "arm: service_callback closing\n");
+ return 0;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ }
+
+ completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
+
+ completion->header = header;
+ completion->reason = reason;
+ /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
+ completion->service_userdata = user_service->service;
+ completion->cb_data = cb_data;
+ completion->cb_userdata = cb_userdata;
+
+ if (reason == VCHIQ_SERVICE_CLOSED) {
+ /*
+ * Take an extra reference, to be held until
+ * this CLOSED notification is delivered.
+ */
+ vchiq_service_get(user_service->service);
+ if (instance->use_close_delivered)
+ user_service->close_pending = 1;
+ }
+
+ /*
+ * A write barrier is needed here to ensure that the entire completion
+ * record is written out before the insert point.
+ */
+ wmb();
+
+ if (reason == VCHIQ_MESSAGE_AVAILABLE)
+ user_service->message_available_pos = insert;
+
+ insert++;
+ instance->completion_insert = insert;
+
+ complete(&instance->insert_event);
+
+ return 0;
+}
+
+static int
+service_single_message(struct vchiq_instance *instance,
+ enum vchiq_reason reason, struct vchiq_service *service,
+ void *cb_data, void __user *cb_userdata)
+{
+ struct user_service *user_service;
+
+ user_service = (struct user_service *)service->base.userdata;
+
+ dev_dbg(service->state->dev, "arm: msg queue full\n");
+ /*
+ * If there is no MESSAGE_AVAILABLE in the completion
+ * queue, add one
+ */
+ if ((user_service->message_available_pos -
+ instance->completion_remove) < 0) {
+ int ret;
+
+ dev_dbg(instance->state->dev,
+ "arm: Inserting extra MESSAGE_AVAILABLE\n");
+ ret = add_completion(instance, reason, NULL, user_service,
+ cb_data, cb_userdata);
+ if (ret)
+ return ret;
+ }
+
+ if (wait_for_completion_interruptible(&user_service->remove_event)) {
+ dev_dbg(instance->state->dev, "arm: interrupted\n");
+ return -EAGAIN;
+ } else if (instance->closing) {
+ dev_dbg(instance->state->dev, "arm: closing\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
+ struct vchiq_header *header, unsigned int handle,
+ void *cb_data, void __user *cb_userdata)
+{
+ /*
+ * How do we ensure the callback goes to the right client?
+ * The service_user data points to a user_service record
+ * containing the original callback and the user state structure, which
+ * contains a circular buffer for completion records.
+ */
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
+ struct user_service *user_service;
+ struct vchiq_service *service;
+ bool skip_completion = false;
+
+ DEBUG_INITIALISE(mgmt->state.local);
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ if (WARN_ON(!service)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ user_service = (struct user_service *)service->base.userdata;
+
+ if (instance->closing) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ /*
+ * As hopping around different synchronization mechanism,
+ * taking an extra reference results in simpler implementation.
+ */
+ vchiq_service_get(service);
+ rcu_read_unlock();
+
+ dev_dbg(service->state->dev,
+ "arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
+ user_service, service->localport, user_service->userdata,
+ reason, header, instance, cb_data, cb_userdata);
+
+ if (header && user_service->is_vchi) {
+ spin_lock(&service->state->msg_queue_spinlock);
+ while (user_service->msg_insert ==
+ (user_service->msg_remove + MSG_QUEUE_SIZE)) {
+ int ret;
+
+ spin_unlock(&service->state->msg_queue_spinlock);
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
+
+ ret = service_single_message(instance, reason, service,
+ cb_data, cb_userdata);
+ if (ret) {
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
+ return ret;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ spin_lock(&service->state->msg_queue_spinlock);
+ }
+
+ user_service->msg_queue[user_service->msg_insert &
+ (MSG_QUEUE_SIZE - 1)] = header;
+ user_service->msg_insert++;
+
+ /*
+ * If there is a thread waiting in DEQUEUE_MESSAGE, or if
+ * there is a MESSAGE_AVAILABLE in the completion queue then
+ * bypass the completion queue.
+ */
+ if (((user_service->message_available_pos -
+ instance->completion_remove) >= 0) ||
+ user_service->dequeue_pending) {
+ user_service->dequeue_pending = 0;
+ skip_completion = true;
+ }
+
+ spin_unlock(&service->state->msg_queue_spinlock);
+ complete(&user_service->insert_event);
+
+ header = NULL;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
+
+ if (skip_completion)
+ return 0;
+
+ return add_completion(instance, reason, header, user_service,
+ cb_data, cb_userdata);
+}
+
+void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
+{
+ int i;
+
+ if (!vchiq_remote_initialised(state))
+ return;
+
+ /*
+ * There is no list of instances, so instead scan all services,
+ * marking those that have been dumped.
+ */
+
+ rcu_read_lock();
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service;
+ struct vchiq_instance *instance;
+
+ service = rcu_dereference(state->services[i]);
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service;
+ struct vchiq_instance *instance;
+
+ rcu_read_lock();
+ service = rcu_dereference(state->services[i]);
+ if (!service || service->base.callback != service_callback) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ instance = service->instance;
+ if (!instance || instance->mark) {
+ rcu_read_unlock();
+ continue;
+ }
+ rcu_read_unlock();
+
+ seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
+ instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+ instance->mark = 1;
+ }
+}
+
+void vchiq_dump_platform_service_state(struct seq_file *f,
+ struct vchiq_service *service)
+{
+ struct user_service *user_service =
+ (struct user_service *)service->base.userdata;
+
+ seq_printf(f, " instance %pK", service->instance);
+
+ if ((service->base.callback == service_callback) && user_service->is_vchi) {
+ seq_printf(f, ", %d/%d messages",
+ user_service->msg_insert - user_service->msg_remove,
+ MSG_QUEUE_SIZE);
+
+ if (user_service->dequeue_pending)
+ seq_puts(f, " (dequeue pending)");
+ }
+
+ seq_puts(f, "\n");
+}
+
+/*
+ * Autosuspend related functionality
+ */
+
+static int
+vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int service_user,
+ void *cb_data, void __user *cb_userdata)
+{
+ dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
+ __func__, reason);
+ return 0;
+}
+
+static int
+vchiq_keepalive_thread_func(void *v)
+{
+ struct vchiq_state *state = (struct vchiq_state *)v;
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ struct vchiq_instance *instance;
+ unsigned int ka_handle;
+ int ret;
+
+ struct vchiq_service_params_kernel params = {
+ .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
+ .callback = vchiq_keepalive_vchiq_callback,
+ .version = KEEPALIVE_VER,
+ .version_min = KEEPALIVE_VER_MIN
+ };
+
+ ret = vchiq_initialise(state, &instance);
+ if (ret) {
+ dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
+ goto exit;
+ }
+
+ ret = vchiq_connect(instance);
+ if (ret) {
+ dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
+ goto shutdown;
+ }
+
+ ret = vchiq_add_service(instance, &params, &ka_handle);
+ if (ret) {
+ dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
+ __func__, ret);
+ goto shutdown;
+ }
+
+ while (!kthread_should_stop()) {
+ long rc = 0, uc = 0;
+
+ if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
+ dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
+ flush_signals(current);
+ continue;
+ }
+
+ /*
+ * read and clear counters. Do release_count then use_count to
+ * prevent getting more releases than uses
+ */
+ rc = atomic_xchg(&arm_state->ka_release_count, 0);
+ uc = atomic_xchg(&arm_state->ka_use_count, 0);
+
+ /*
+ * Call use/release service the requisite number of times.
+ * Process use before release so use counts don't go negative
+ */
+ while (uc--) {
+ atomic_inc(&arm_state->ka_use_ack_count);
+ ret = vchiq_use_service(instance, ka_handle);
+ if (ret) {
+ dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
+ __func__, ret);
+ }
+ }
+ while (rc--) {
+ ret = vchiq_release_service(instance, ka_handle);
+ if (ret) {
+ dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
+ __func__, ret);
+ }
+ }
+ }
+
+shutdown:
+ vchiq_shutdown(instance);
+exit:
+ return 0;
+}
+
+int
+vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ enum USE_TYPE_E use_type)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ int ret = 0;
+ char entity[64];
+ int *entity_uc;
+ int local_uc;
+
+ if (!arm_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (use_type == USE_TYPE_VCHIQ) {
+ snprintf(entity, sizeof(entity), "VCHIQ: ");
+ entity_uc = &arm_state->peer_use_count;
+ } else if (service) {
+ snprintf(entity, sizeof(entity), "%p4cc:%03d",
+ &service->base.fourcc,
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ local_uc = ++arm_state->videocore_use_count;
+ ++(*entity_uc);
+
+ dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+ entity, *entity_uc, local_uc);
+
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (!ret) {
+ int ret = 0;
+ long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
+
+ while (ack_cnt && !ret) {
+ /* Send the use notify to videocore */
+ ret = vchiq_send_remote_use_active(state);
+ if (!ret)
+ ack_cnt--;
+ else
+ atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
+ }
+ }
+
+out:
+ dev_dbg(state->dev, "suspend: exit %d\n", ret);
+ return ret;
+}
+
+int
+vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ int ret = 0;
+ char entity[64];
+ int *entity_uc;
+
+ if (!arm_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (service) {
+ snprintf(entity, sizeof(entity), "%p4cc:%03d",
+ &service->base.fourcc,
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ snprintf(entity, sizeof(entity), "PEER: ");
+ entity_uc = &arm_state->peer_use_count;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (!arm_state->videocore_use_count || !(*entity_uc)) {
+ WARN_ON(!arm_state->videocore_use_count);
+ WARN_ON(!(*entity_uc));
+ ret = -EINVAL;
+ goto unlock;
+ }
+ --arm_state->videocore_use_count;
+ --(*entity_uc);
+
+ dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+ entity, *entity_uc, arm_state->videocore_use_count);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ dev_dbg(state->dev, "suspend: exit %d\n", ret);
+ return ret;
+}
+
+void
+vchiq_on_remote_use(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+
+ atomic_inc(&arm_state->ka_use_count);
+ complete(&arm_state->ka_evt);
+}
+
+void
+vchiq_on_remote_release(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+
+ atomic_inc(&arm_state->ka_release_count);
+ complete(&arm_state->ka_evt);
+}
+
+int
+vchiq_use_service_internal(struct vchiq_service *service)
+{
+ return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
+}
+
+int
+vchiq_release_service_internal(struct vchiq_service *service)
+{
+ return vchiq_release_internal(service->state, service);
+}
+
+struct vchiq_debugfs_node *
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
+{
+ return &instance->debugfs_node;
+}
+
+int
+vchiq_instance_get_use_count(struct vchiq_instance *instance)
+{
+ struct vchiq_service *service;
+ int use_count = 0, i;
+
+ i = 0;
+ rcu_read_lock();
+ while ((service = __next_service_by_instance(instance->state,
+ instance, &i)))
+ use_count += service->service_use_count;
+ rcu_read_unlock();
+ return use_count;
+}
+
+int
+vchiq_instance_get_pid(struct vchiq_instance *instance)
+{
+ return instance->pid;
+}
+
+int
+vchiq_instance_get_trace(struct vchiq_instance *instance)
+{
+ return instance->trace;
+}
+
+void
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
+{
+ struct vchiq_service *service;
+ int i;
+
+ i = 0;
+ rcu_read_lock();
+ while ((service = __next_service_by_instance(instance->state,
+ instance, &i)))
+ service->trace = trace;
+ rcu_read_unlock();
+ instance->trace = (trace != 0);
+}
+
+int
+vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ int ret = -EINVAL;
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+
+ if (service) {
+ ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
+ vchiq_service_put(service);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_use_service);
+
+int
+vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ int ret = -EINVAL;
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+
+ if (service) {
+ ret = vchiq_release_internal(service->state, service);
+ vchiq_service_put(service);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchiq_release_service);
+
+struct service_data_struct {
+ int fourcc;
+ int clientid;
+ int use_count;
+};
+
+void
+vchiq_dump_service_use_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ struct service_data_struct *service_data;
+ int i, found = 0;
+ /*
+ * If there's more than 64 services, only dump ones with
+ * non-zero counts
+ */
+ int only_nonzero = 0;
+ static const char *nz = "<-- preventing suspend";
+
+ int peer_count;
+ int vc_use_count;
+ int active_services;
+
+ if (!arm_state)
+ return;
+
+ service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
+ GFP_KERNEL);
+ if (!service_data)
+ return;
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ peer_count = arm_state->peer_use_count;
+ vc_use_count = arm_state->videocore_use_count;
+ active_services = state->unused_service;
+ if (active_services > MAX_SERVICES)
+ only_nonzero = 1;
+
+ rcu_read_lock();
+ for (i = 0; i < active_services; i++) {
+ struct vchiq_service *service_ptr =
+ rcu_dereference(state->services[i]);
+
+ if (!service_ptr)
+ continue;
+
+ if (only_nonzero && !service_ptr->service_use_count)
+ continue;
+
+ if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
+ continue;
+
+ service_data[found].fourcc = service_ptr->base.fourcc;
+ service_data[found].clientid = service_ptr->client_id;
+ service_data[found].use_count = service_ptr->service_use_count;
+ found++;
+ if (found >= MAX_SERVICES)
+ break;
+ }
+ rcu_read_unlock();
+
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ if (only_nonzero)
+ dev_warn(state->dev,
+ "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
+ active_services, found);
+
+ for (i = 0; i < found; i++) {
+ dev_warn(state->dev,
+ "suspend: %p4cc:%d service count %d %s\n",
+ &service_data[i].fourcc,
+ service_data[i].clientid, service_data[i].use_count,
+ service_data[i].use_count ? nz : "");
+ }
+ dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
+ dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
+
+ kfree(service_data);
+}
+
+int
+vchiq_check_service(struct vchiq_service *service)
+{
+ struct vchiq_arm_state *arm_state;
+ int ret = -EINVAL;
+
+ if (!service || !service->state)
+ goto out;
+
+ arm_state = vchiq_platform_get_arm_state(service->state);
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ if (service->service_use_count)
+ ret = 0;
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ if (ret) {
+ dev_err(service->state->dev,
+ "suspend: %s: %p4cc:%d service count %d, state count %d\n",
+ __func__, &service->base.fourcc, service->client_id,
+ service->service_use_count, arm_state->videocore_use_count);
+ vchiq_dump_service_use_state(service->state);
+ }
+out:
+ return ret;
+}
+
+void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
+
+ dev_dbg(state->dev, "suspend: %d: %s->%s\n",
+ state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+ threadname);
+ } else {
+ wake_up_process(arm_state->ka_thread);
+ }
+}
+
+static const struct of_device_id vchiq_of_match[] = {
+ { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
+ { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vchiq_of_match);
+
+static int vchiq_probe(struct platform_device *pdev)
+{
+ const struct vchiq_platform_info *info;
+ struct vchiq_drv_mgmt *mgmt;
+ int ret;
+
+ info = of_device_get_match_data(&pdev->dev);
+ if (!info)
+ return -EINVAL;
+
+ struct device_node *fw_node __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
+ if (!fw_node) {
+ dev_err(&pdev->dev, "Missing firmware node\n");
+ return -ENOENT;
+ }
+
+ mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
+ if (!mgmt)
+ return -ENOMEM;
+
+ mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
+ if (!mgmt->fw)
+ return -EPROBE_DEFER;
+
+ mgmt->info = info;
+ platform_set_drvdata(pdev, mgmt);
+
+ ret = vchiq_platform_init(pdev, &mgmt->state);
+ if (ret) {
+ dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+
+ /*
+ * Simply exit on error since the function handles cleanup in
+ * cases of failure.
+ */
+ ret = vchiq_register_chrdev(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
+ vchiq_platform_uninit(mgmt);
+ return ret;
+ }
+
+ vchiq_debugfs_init(&mgmt->state);
+
+ bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
+
+ return 0;
+}
+
+static void vchiq_remove(struct platform_device *pdev)
+{
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
+
+ vchiq_device_unregister(bcm2835_audio);
+ vchiq_debugfs_deinit();
+ vchiq_deregister_chrdev();
+ vchiq_platform_uninit(mgmt);
+}
+
+static struct platform_driver vchiq_driver = {
+ .driver = {
+ .name = "bcm2835_vchiq",
+ .of_match_table = vchiq_of_match,
+ },
+ .probe = vchiq_probe,
+ .remove = vchiq_remove,
+};
+
+static int __init vchiq_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vchiq_bus_type);
+ if (ret) {
+ pr_err("Failed to register %s\n", vchiq_bus_type.name);
+ return ret;
+ }
+
+ ret = platform_driver_register(&vchiq_driver);
+ if (ret) {
+ pr_err("Failed to register vchiq driver\n");
+ bus_unregister(&vchiq_bus_type);
+ }
+
+ return ret;
+}
+module_init(vchiq_driver_init);
+
+static void __exit vchiq_driver_exit(void)
+{
+ bus_unregister(&vchiq_bus_type);
+ platform_driver_unregister(&vchiq_driver);
+}
+module_exit(vchiq_driver_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Videocore VCHIQ driver");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c
new file mode 100644
index 000000000000..f50e637d505c
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * vchiq_device.c - VCHIQ generic device and bus-type
+ *
+ * Copyright (c) 2023 Ideas On Board Oy
+ */
+
+#include <linux/device/bus.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/raspberrypi/vchiq_arm.h>
+#include <linux/raspberrypi/vchiq_bus.h>
+
+static int vchiq_bus_type_match(struct device *dev, const struct device_driver *drv)
+{
+ if (dev->bus == &vchiq_bus_type &&
+ strcmp(dev_name(dev), drv->name) == 0)
+ return true;
+
+ return false;
+}
+
+static int vchiq_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct vchiq_device *device = container_of_const(dev, struct vchiq_device, dev);
+
+ return add_uevent_var(env, "MODALIAS=vchiq:%s", dev_name(&device->dev));
+}
+
+static int vchiq_bus_probe(struct device *dev)
+{
+ struct vchiq_device *device = to_vchiq_device(dev);
+ struct vchiq_driver *driver = to_vchiq_driver(dev->driver);
+
+ return driver->probe(device);
+}
+
+static void vchiq_bus_remove(struct device *dev)
+{
+ struct vchiq_device *device = to_vchiq_device(dev);
+ struct vchiq_driver *driver = to_vchiq_driver(dev->driver);
+
+ if (driver->remove)
+ driver->remove(device);
+}
+
+const struct bus_type vchiq_bus_type = {
+ .name = "vchiq-bus",
+ .match = vchiq_bus_type_match,
+ .uevent = vchiq_bus_uevent,
+ .probe = vchiq_bus_probe,
+ .remove = vchiq_bus_remove,
+};
+
+static void vchiq_device_release(struct device *dev)
+{
+ struct vchiq_device *device = to_vchiq_device(dev);
+
+ kfree(device);
+}
+
+struct vchiq_device *
+vchiq_device_register(struct device *parent, const char *name)
+{
+ struct vchiq_device *device;
+ int ret;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.init_name = name;
+ device->dev.parent = parent;
+ device->dev.bus = &vchiq_bus_type;
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.release = vchiq_device_release;
+
+ device->drv_mgmt = dev_get_drvdata(parent);
+
+ of_dma_configure(&device->dev, parent->of_node, true);
+
+ ret = device_register(&device->dev);
+ if (ret) {
+ dev_err(parent, "Cannot register %s: %d\n", name, ret);
+ put_device(&device->dev);
+ return NULL;
+ }
+
+ return device;
+}
+
+void vchiq_device_unregister(struct vchiq_device *vchiq_dev)
+{
+ device_unregister(&vchiq_dev->dev);
+}
+
+int vchiq_driver_register(struct vchiq_driver *vchiq_drv)
+{
+ vchiq_drv->driver.bus = &vchiq_bus_type;
+
+ return driver_register(&vchiq_drv->driver);
+}
+EXPORT_SYMBOL_GPL(vchiq_driver_register);
+
+void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv)
+{
+ driver_unregister(&vchiq_drv->driver);
+}
+EXPORT_SYMBOL_GPL(vchiq_driver_unregister);
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
new file mode 100644
index 000000000000..83de27cfd469
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c
@@ -0,0 +1,4013 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/signal.h>
+
+#include <linux/raspberrypi/vchiq_arm.h>
+#include <linux/raspberrypi/vchiq_core.h>
+
+#define VCHIQ_SLOT_HANDLER_STACK 8192
+
+#define VCHIQ_MSG_PADDING 0 /* - */
+#define VCHIQ_MSG_CONNECT 1 /* - */
+#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
+#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
+#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
+#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
+#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_PAUSE 10 /* - */
+#define VCHIQ_MSG_RESUME 11 /* - */
+#define VCHIQ_MSG_REMOTE_USE 12 /* - */
+#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
+#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
+
+#define TYPE_SHIFT 24
+
+#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
+#define VCHIQ_PORT_FREE 0x1000
+#define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
+#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
+ (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
+#define VCHIQ_MSG_SRCPORT(msgid) \
+ ((unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff))
+#define VCHIQ_MSG_DSTPORT(msgid) \
+ ((unsigned short)(msgid) & 0xfff)
+
+#define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
+#define MAKE_OPEN(srcport) \
+ ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
+#define MAKE_OPENACK(srcport, dstport) \
+ ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_CLOSE(srcport, dstport) \
+ ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_DATA(srcport, dstport) \
+ ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
+#define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
+#define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
+#define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
+
+#define PAGELIST_WRITE 0
+#define PAGELIST_READ 1
+#define PAGELIST_READ_WITH_FRAGMENTS 2
+
+#define BELL2 0x08
+
+/* Ensure the fields are wide enough */
+static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) == 0);
+static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
+static_assert((unsigned int)VCHIQ_PORT_MAX < (unsigned int)VCHIQ_PORT_FREE);
+
+#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
+#define VCHIQ_MSGID_CLAIMED 0x40000000
+
+#define VCHIQ_FOURCC_INVALID 0x00000000
+#define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
+
+#define VCHIQ_BULK_ACTUAL_ABORTED -1
+
+#if VCHIQ_ENABLE_STATS
+#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
+ (service->stats. stat += addend)
+#else
+#define VCHIQ_STATS_INC(state, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
+#endif
+
+#define HANDLE_STATE_SHIFT 12
+
+#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
+#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
+#define SLOT_INDEX_FROM_DATA(state, data) \
+ (((unsigned int)((char *)data - (char *)state->slot_data)) / \
+ VCHIQ_SLOT_SIZE)
+#define SLOT_INDEX_FROM_INFO(state, info) \
+ ((unsigned int)(info - state->slot_info))
+#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
+ ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
+#define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
+ (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
+
+#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
+
+#define NO_CLOSE_RECVD 0
+#define CLOSE_RECVD 1
+
+#define NO_RETRY_POLL 0
+#define RETRY_POLL 1
+
+struct vchiq_open_payload {
+ int fourcc;
+ int client_id;
+ short version;
+ short version_min;
+};
+
+struct vchiq_openack_payload {
+ short version;
+};
+
+enum {
+ QMFLAGS_IS_BLOCKING = BIT(0),
+ QMFLAGS_NO_MUTEX_LOCK = BIT(1),
+ QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
+};
+
+enum {
+ VCHIQ_POLL_TERMINATE,
+ VCHIQ_POLL_REMOVE,
+ VCHIQ_POLL_TXNOTIFY,
+ VCHIQ_POLL_RXNOTIFY,
+ VCHIQ_POLL_COUNT
+};
+
+/* we require this for consistency between endpoints */
+static_assert(sizeof(struct vchiq_header) == 8);
+static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
+}
+
+static unsigned int handle_seq;
+
+static const char *const srvstate_names[] = {
+ "FREE",
+ "HIDDEN",
+ "LISTENING",
+ "OPENING",
+ "OPEN",
+ "OPENSYNC",
+ "CLOSESENT",
+ "CLOSERECVD",
+ "CLOSEWAIT",
+ "CLOSED"
+};
+
+static const char *const reason_names[] = {
+ "SERVICE_OPENED",
+ "SERVICE_CLOSED",
+ "MESSAGE_AVAILABLE",
+ "BULK_TRANSMIT_DONE",
+ "BULK_RECEIVE_DONE",
+ "BULK_TRANSMIT_ABORTED",
+ "BULK_RECEIVE_ABORTED"
+};
+
+static const char *const conn_state_names[] = {
+ "DISCONNECTED",
+ "CONNECTING",
+ "CONNECTED",
+ "PAUSING",
+ "PAUSE_SENT",
+ "PAUSED",
+ "RESUMING",
+ "PAUSE_TIMEOUT",
+ "RESUME_TIMEOUT"
+};
+
+static void
+release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
+
+static const char *msg_type_str(unsigned int msg_type)
+{
+ switch (msg_type) {
+ case VCHIQ_MSG_PADDING: return "PADDING";
+ case VCHIQ_MSG_CONNECT: return "CONNECT";
+ case VCHIQ_MSG_OPEN: return "OPEN";
+ case VCHIQ_MSG_OPENACK: return "OPENACK";
+ case VCHIQ_MSG_CLOSE: return "CLOSE";
+ case VCHIQ_MSG_DATA: return "DATA";
+ case VCHIQ_MSG_BULK_RX: return "BULK_RX";
+ case VCHIQ_MSG_BULK_TX: return "BULK_TX";
+ case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
+ case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
+ case VCHIQ_MSG_PAUSE: return "PAUSE";
+ case VCHIQ_MSG_RESUME: return "RESUME";
+ case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
+ case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
+ }
+ return "???";
+}
+
+static inline void
+set_service_state(struct vchiq_service *service, int newstate)
+{
+ dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
+ service->srvstate = newstate;
+}
+
+struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ int idx = handle & (VCHIQ_MAX_SERVICES - 1);
+
+ return rcu_dereference(instance->state->services[idx]);
+}
+
+struct vchiq_service *
+find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
+{
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ service->handle == handle &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
+ return NULL;
+}
+
+struct vchiq_service *
+find_service_by_port(struct vchiq_state *state, unsigned int localport)
+{
+ if (localport <= VCHIQ_PORT_MAX) {
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ service = rcu_dereference(state->services[localport]);
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ }
+ dev_dbg(state->dev, "core: Invalid port %u\n", localport);
+ return NULL;
+}
+
+struct vchiq_service *
+find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
+{
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ service->handle == handle &&
+ service->instance == instance &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
+ return NULL;
+}
+
+struct vchiq_service *
+find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
+{
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ if (service &&
+ (service->srvstate == VCHIQ_SRVSTATE_FREE ||
+ service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
+ service->handle == handle &&
+ service->instance == instance &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
+ return service;
+}
+
+struct vchiq_service *
+__next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx)
+{
+ struct vchiq_service *service = NULL;
+ int idx = *pidx;
+
+ while (idx < state->unused_service) {
+ struct vchiq_service *srv;
+
+ srv = rcu_dereference(state->services[idx]);
+ idx++;
+ if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
+ srv->instance == instance) {
+ service = srv;
+ break;
+ }
+ }
+
+ *pidx = idx;
+ return service;
+}
+
+struct vchiq_service *
+next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx)
+{
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ while (1) {
+ service = __next_service_by_instance(state, instance, pidx);
+ if (!service)
+ break;
+ if (kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return service;
+}
+
+void
+vchiq_service_get(struct vchiq_service *service)
+{
+ if (!service) {
+ WARN(1, "%s service is NULL\n", __func__);
+ return;
+ }
+ kref_get(&service->ref_count);
+}
+
+static void service_release(struct kref *kref)
+{
+ struct vchiq_service *service =
+ container_of(kref, struct vchiq_service, ref_count);
+ struct vchiq_state *state = service->state;
+
+ WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+ rcu_assign_pointer(state->services[service->localport], NULL);
+ if (service->userdata_term)
+ service->userdata_term(service->base.userdata);
+ kfree_rcu(service, rcu);
+}
+
+void
+vchiq_service_put(struct vchiq_service *service)
+{
+ if (!service) {
+ WARN(1, "%s: service is NULL\n", __func__);
+ return;
+ }
+ kref_put(&service->ref_count, service_release);
+}
+
+int
+vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
+{
+ struct vchiq_service *service;
+ int id;
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ id = service ? service->client_id : 0;
+ rcu_read_unlock();
+ return id;
+}
+
+void *
+vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
+{
+ void *userdata;
+ struct vchiq_service *service;
+
+ rcu_read_lock();
+ service = handle_to_service(instance, handle);
+ userdata = service ? service->base.userdata : NULL;
+ rcu_read_unlock();
+ return userdata;
+}
+EXPORT_SYMBOL(vchiq_get_service_userdata);
+
+static void
+mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
+{
+ struct vchiq_state *state = service->state;
+ struct vchiq_service_quota *quota;
+
+ service->closing = 1;
+
+ /* Synchronise with other threads. */
+ mutex_lock(&state->recycle_mutex);
+ mutex_unlock(&state->recycle_mutex);
+ if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
+ /*
+ * If we're pausing then the slot_mutex is held until resume
+ * by the slot handler. Therefore don't try to acquire this
+ * mutex if we're the slot handler and in the pause sent state.
+ * We don't need to in this case anyway.
+ */
+ mutex_lock(&state->slot_mutex);
+ mutex_unlock(&state->slot_mutex);
+ }
+
+ /* Unblock any sending thread. */
+ quota = &state->service_quotas[service->localport];
+ complete(&quota->quota_event);
+}
+
+static void
+mark_service_closing(struct vchiq_service *service)
+{
+ mark_service_closing_internal(service, 0);
+}
+
+static inline int
+make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
+ struct vchiq_header *header, struct vchiq_bulk *bulk)
+{
+ void *cb_data = NULL;
+ void __user *cb_userdata = NULL;
+ int status;
+
+ /*
+ * If a bulk transfer is in progress, pass bulk->cb_*data to the
+ * callback function.
+ */
+ if (bulk) {
+ cb_data = bulk->cb_data;
+ cb_userdata = bulk->cb_userdata;
+ }
+
+ dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %p, %p %p)\n",
+ service->state->id, service->localport, reason_names[reason],
+ header, cb_data, cb_userdata);
+ status = service->base.callback(service->instance, reason, header, service->handle,
+ cb_data, cb_userdata);
+ if (status && (status != -EAGAIN)) {
+ dev_warn(service->state->dev,
+ "core: %d: ignoring ERROR from callback to service %x\n",
+ service->state->id, service->handle);
+ status = 0;
+ }
+
+ if (reason != VCHIQ_MESSAGE_AVAILABLE)
+ vchiq_release_message(service->instance, service->handle, header);
+
+ return status;
+}
+
+inline void
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
+{
+ enum vchiq_connstate oldstate = state->conn_state;
+
+ dev_dbg(state->dev, "core: %d: %s->%s\n",
+ state->id, conn_state_names[oldstate], conn_state_names[newstate]);
+ state->conn_state = newstate;
+ vchiq_platform_conn_state_changed(state, oldstate, newstate);
+}
+
+/* This initialises a single remote_event, and the associated wait_queue. */
+static inline void
+remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
+{
+ event->armed = 0;
+ /*
+ * Don't clear the 'fired' flag because it may already have been set
+ * by the other side.
+ */
+ init_waitqueue_head(wq);
+}
+
+/*
+ * All the event waiting routines in VCHIQ used a custom semaphore
+ * implementation that filtered most signals. This achieved a behaviour similar
+ * to the "killable" family of functions. While cleaning up this code all the
+ * routines where switched to the "interruptible" family of functions, as the
+ * former was deemed unjustified and the use "killable" set all VCHIQ's
+ * threads in D state.
+ *
+ * Returns: 0 on success, a negative error code on failure
+ */
+static inline int
+remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
+{
+ int ret = 0;
+
+ if (!event->fired) {
+ event->armed = 1;
+ dsb(sy);
+ ret = wait_event_interruptible(*wq, event->fired);
+ if (ret) {
+ event->armed = 0;
+ return ret;
+ }
+ event->armed = 0;
+ /* Ensure that the peer sees that we are not waiting (armed == 0). */
+ wmb();
+ }
+
+ event->fired = 0;
+ return ret;
+}
+
+static void
+remote_event_signal(struct vchiq_state *state, struct remote_event *event)
+{
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(state->dev);
+
+ /*
+ * Ensure that all writes to shared data structures have completed
+ * before signalling the peer.
+ */
+ wmb();
+
+ event->fired = 1;
+
+ dsb(sy); /* data barrier operation */
+
+ if (event->armed)
+ writel(0, mgmt->regs + BELL2); /* trigger vc interrupt */
+}
+
+/*
+ * Acknowledge that the event has been signalled, and wake any waiters. Usually
+ * called as a result of the doorbell being rung.
+ */
+static inline void
+remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
+{
+ event->fired = 1;
+ event->armed = 0;
+ wake_up_all(wq);
+}
+
+/* Check if a single event has been signalled, waking the waiters if it has. */
+static inline void
+remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
+{
+ if (event->fired && event->armed)
+ remote_event_signal_local(wq, event);
+}
+
+/*
+ * VCHIQ used a small, fixed number of remote events. It is simplest to
+ * enumerate them here for polling.
+ */
+void
+remote_event_pollall(struct vchiq_state *state)
+{
+ remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
+ remote_event_poll(&state->sync_release_event, &state->local->sync_release);
+ remote_event_poll(&state->trigger_event, &state->local->trigger);
+ remote_event_poll(&state->recycle_event, &state->local->recycle);
+}
+
+/*
+ * Round up message sizes so that any space at the end of a slot is always big
+ * enough for a header. This relies on header size being a power of two, which
+ * has been verified earlier by a static assertion.
+ */
+
+static inline size_t
+calc_stride(size_t size)
+{
+ /* Allow room for the header */
+ size += sizeof(struct vchiq_header);
+
+ /* Round up */
+ return (size + sizeof(struct vchiq_header) - 1) &
+ ~(sizeof(struct vchiq_header) - 1);
+}
+
+/* Called by the slot handler thread */
+static struct vchiq_service *
+get_listening_service(struct vchiq_state *state, int fourcc)
+{
+ int i;
+
+ WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
+
+ rcu_read_lock();
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service;
+
+ service = rcu_dereference(state->services[i]);
+ if (service &&
+ service->public_fourcc == fourcc &&
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
+ service->remoteport == VCHIQ_PORT_FREE)) &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+/* Called by the slot handler thread */
+static struct vchiq_service *
+get_connected_service(struct vchiq_state *state, unsigned int port)
+{
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service =
+ rcu_dereference(state->services[i]);
+
+ if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
+ service->remoteport == port &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+inline void
+request_poll(struct vchiq_state *state, struct vchiq_service *service,
+ int poll_type)
+{
+ u32 value;
+ int index;
+
+ if (!service)
+ goto skip_service;
+
+ do {
+ value = atomic_read(&service->poll_flags);
+ } while (atomic_cmpxchg(&service->poll_flags, value,
+ value | BIT(poll_type)) != value);
+
+ index = BITSET_WORD(service->localport);
+ do {
+ value = atomic_read(&state->poll_services[index]);
+ } while (atomic_cmpxchg(&state->poll_services[index],
+ value, value | BIT(service->localport & 0x1f)) != value);
+
+skip_service:
+ state->poll_needed = 1;
+ /* Ensure the slot handler thread sees the poll_needed flag. */
+ wmb();
+
+ /* ... and ensure the slot handler runs. */
+ remote_event_signal_local(&state->trigger_event, &state->local->trigger);
+}
+
+/*
+ * Called from queue_message, by the slot handler and application threads,
+ * with slot_mutex held
+ */
+static struct vchiq_header *
+reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
+{
+ struct vchiq_shared_state *local = state->local;
+ int tx_pos = state->local_tx_pos;
+ int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
+
+ if (space > slot_space) {
+ struct vchiq_header *header;
+ /* Fill the remaining space with padding */
+ WARN_ON(!state->tx_data);
+ header = (struct vchiq_header *)
+ (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
+ header->msgid = VCHIQ_MSGID_PADDING;
+ header->size = slot_space - sizeof(struct vchiq_header);
+
+ tx_pos += slot_space;
+ }
+
+ /* If necessary, get the next slot. */
+ if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
+ int slot_index;
+
+ /* If there is no free slot... */
+
+ if (!try_wait_for_completion(&state->slot_available_event)) {
+ /* ...wait for one. */
+
+ VCHIQ_STATS_INC(state, slot_stalls);
+
+ /* But first, flush through the last slot. */
+ state->local_tx_pos = tx_pos;
+ local->tx_pos = tx_pos;
+ remote_event_signal(state, &state->remote->trigger);
+
+ if (!is_blocking ||
+ (wait_for_completion_interruptible(&state->slot_available_event)))
+ return NULL; /* No space available */
+ }
+
+ if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
+ complete(&state->slot_available_event);
+ dev_warn(state->dev, "%s: invalid tx_pos: %d\n",
+ __func__, tx_pos);
+ return NULL;
+ }
+
+ slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
+ state->tx_data =
+ (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ }
+
+ state->local_tx_pos = tx_pos + space;
+
+ return (struct vchiq_header *)(state->tx_data +
+ (tx_pos & VCHIQ_SLOT_MASK));
+}
+
+static void
+process_free_data_message(struct vchiq_state *state, u32 *service_found,
+ struct vchiq_header *header)
+{
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_SRCPORT(msgid);
+ struct vchiq_service_quota *quota = &state->service_quotas[port];
+ int count;
+
+ spin_lock(&state->quota_spinlock);
+ count = quota->message_use_count;
+ if (count > 0)
+ quota->message_use_count = count - 1;
+ spin_unlock(&state->quota_spinlock);
+
+ if (count == quota->message_quota) {
+ /*
+ * Signal the service that it
+ * has dropped below its quota
+ */
+ complete(&quota->quota_event);
+ } else if (count == 0) {
+ dev_err(state->dev,
+ "core: service %d message_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
+ port, quota->message_use_count, header, msgid,
+ header->msgid, header->size);
+ WARN(1, "invalid message use count\n");
+ }
+ if (!BITSET_IS_SET(service_found, port)) {
+ /* Set the found bit for this service */
+ BITSET_SET(service_found, port);
+
+ spin_lock(&state->quota_spinlock);
+ count = quota->slot_use_count;
+ if (count > 0)
+ quota->slot_use_count = count - 1;
+ spin_unlock(&state->quota_spinlock);
+
+ if (count > 0) {
+ /*
+ * Signal the service in case
+ * it has dropped below its quota
+ */
+ complete(&quota->quota_event);
+ dev_dbg(state->dev, "core: %d: pfq:%d %x@%p - slot_use->%d\n",
+ state->id, port, header->size, header, count - 1);
+ } else {
+ dev_err(state->dev,
+ "core: service %d slot_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
+ port, count, header, msgid, header->msgid, header->size);
+ WARN(1, "bad slot use count\n");
+ }
+ }
+}
+
+/* Called by the recycle thread. */
+static void
+process_free_queue(struct vchiq_state *state, u32 *service_found,
+ size_t length)
+{
+ struct vchiq_shared_state *local = state->local;
+ int slot_queue_available;
+
+ /*
+ * Find slots which have been freed by the other side, and return them
+ * to the available queue.
+ */
+ slot_queue_available = state->slot_queue_available;
+
+ /*
+ * Use a memory barrier to ensure that any state that may have been
+ * modified by another thread is not masked by stale prefetched
+ * values.
+ */
+ mb();
+
+ while (slot_queue_available != local->slot_queue_recycle) {
+ unsigned int pos;
+ int slot_index = local->slot_queue[slot_queue_available &
+ VCHIQ_SLOT_QUEUE_MASK];
+ char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ int data_found = 0;
+
+ slot_queue_available++;
+ /*
+ * Beware of the address dependency - data is calculated
+ * using an index written by the other side.
+ */
+ rmb();
+
+ dev_dbg(state->dev, "core: %d: pfq %d=%p %x %x\n",
+ state->id, slot_index, data, local->slot_queue_recycle,
+ slot_queue_available);
+
+ /* Initialise the bitmask for services which have used this slot */
+ memset(service_found, 0, length);
+
+ pos = 0;
+
+ while (pos < VCHIQ_SLOT_SIZE) {
+ struct vchiq_header *header =
+ (struct vchiq_header *)(data + pos);
+ int msgid = header->msgid;
+
+ if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
+ process_free_data_message(state, service_found,
+ header);
+ data_found = 1;
+ }
+
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ dev_err(state->dev,
+ "core: pfq - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
+ pos, header, msgid, header->msgid, header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+
+ if (data_found) {
+ int count;
+
+ spin_lock(&state->quota_spinlock);
+ count = state->data_use_count;
+ if (count > 0)
+ state->data_use_count = count - 1;
+ spin_unlock(&state->quota_spinlock);
+ if (count == state->data_quota)
+ complete(&state->data_quota_event);
+ }
+
+ /*
+ * Don't allow the slot to be reused until we are no
+ * longer interested in it.
+ */
+ mb();
+
+ state->slot_queue_available = slot_queue_available;
+ complete(&state->slot_available_event);
+ }
+}
+
+static ssize_t
+memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
+{
+ memcpy(dest + offset, context + offset, maxsize);
+ return maxsize;
+}
+
+static ssize_t
+copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
+ size_t maxsize),
+ void *context,
+ void *dest,
+ size_t size)
+{
+ size_t pos = 0;
+
+ while (pos < size) {
+ ssize_t callback_result;
+ size_t max_bytes = size - pos;
+
+ callback_result = copy_callback(context, dest + pos, pos,
+ max_bytes);
+
+ if (callback_result < 0)
+ return callback_result;
+
+ if (!callback_result)
+ return -EIO;
+
+ if (callback_result > max_bytes)
+ return -EIO;
+
+ pos += callback_result;
+ }
+
+ return size;
+}
+
+/* Called by the slot handler and application threads */
+static int
+queue_message(struct vchiq_state *state, struct vchiq_service *service,
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context, size_t size, int flags)
+{
+ struct vchiq_shared_state *local;
+ struct vchiq_service_quota *quota = NULL;
+ struct vchiq_header *header;
+ int type = VCHIQ_MSG_TYPE(msgid);
+ int svc_fourcc;
+
+ size_t stride;
+
+ local = state->local;
+
+ stride = calc_stride(size);
+
+ WARN_ON(stride > VCHIQ_SLOT_SIZE);
+
+ if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
+ mutex_lock_killable(&state->slot_mutex))
+ return -EINTR;
+
+ if (type == VCHIQ_MSG_DATA) {
+ int tx_end_index;
+
+ if (!service) {
+ WARN(1, "%s: service is NULL\n", __func__);
+ mutex_unlock(&state->slot_mutex);
+ return -EINVAL;
+ }
+
+ WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK));
+
+ if (service->closing) {
+ /* The service has been closed */
+ mutex_unlock(&state->slot_mutex);
+ return -EHOSTDOWN;
+ }
+
+ quota = &state->service_quotas[service->localport];
+
+ spin_lock(&state->quota_spinlock);
+
+ /*
+ * Ensure this service doesn't use more than its quota of
+ * messages or slots
+ */
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
+
+ /*
+ * Ensure data messages don't use more than their quota of
+ * slots
+ */
+ while ((tx_end_index != state->previous_data_index) &&
+ (state->data_use_count == state->data_quota)) {
+ VCHIQ_STATS_INC(state, data_stalls);
+ spin_unlock(&state->quota_spinlock);
+ mutex_unlock(&state->slot_mutex);
+
+ if (wait_for_completion_killable(&state->data_quota_event))
+ return -EINTR;
+
+ mutex_lock(&state->slot_mutex);
+ spin_lock(&state->quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
+ if ((tx_end_index == state->previous_data_index) ||
+ (state->data_use_count < state->data_quota)) {
+ /* Pass the signal on to other waiters */
+ complete(&state->data_quota_event);
+ break;
+ }
+ }
+
+ while ((quota->message_use_count == quota->message_quota) ||
+ ((tx_end_index != quota->previous_tx_index) &&
+ (quota->slot_use_count == quota->slot_quota))) {
+ spin_unlock(&state->quota_spinlock);
+ dev_dbg(state->dev,
+ "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
+ state->id, service->localport, msg_type_str(type), size,
+ quota->message_use_count, quota->slot_use_count);
+ VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
+ mutex_unlock(&state->slot_mutex);
+ if (wait_for_completion_killable(&quota->quota_event))
+ return -EINTR;
+ if (service->closing)
+ return -EHOSTDOWN;
+ if (mutex_lock_killable(&state->slot_mutex))
+ return -EINTR;
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
+ /* The service has been closed */
+ mutex_unlock(&state->slot_mutex);
+ return -EHOSTDOWN;
+ }
+ spin_lock(&state->quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
+ }
+
+ spin_unlock(&state->quota_spinlock);
+ }
+
+ header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
+
+ if (!header) {
+ if (service)
+ VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
+ /*
+ * In the event of a failure, return the mutex to the
+ * state it was in
+ */
+ if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
+ mutex_unlock(&state->slot_mutex);
+ return -EAGAIN;
+ }
+
+ if (type == VCHIQ_MSG_DATA) {
+ ssize_t callback_result;
+ int tx_end_index;
+ int slot_use_count;
+
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+
+ WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK));
+
+ callback_result =
+ copy_message_data(copy_callback, context,
+ header->data, size);
+
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ return -EINVAL;
+ }
+
+ vchiq_log_dump_mem(state->dev, "Sent", 0,
+ header->data,
+ min_t(size_t, 16, callback_result));
+
+ spin_lock(&state->quota_spinlock);
+ quota->message_use_count++;
+
+ tx_end_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
+
+ /*
+ * If this transmission can't fit in the last slot used by any
+ * service, the data_use_count must be increased.
+ */
+ if (tx_end_index != state->previous_data_index) {
+ state->previous_data_index = tx_end_index;
+ state->data_use_count++;
+ }
+
+ /*
+ * If this isn't the same slot last used by this service,
+ * the service's slot_use_count must be increased.
+ */
+ if (tx_end_index != quota->previous_tx_index) {
+ quota->previous_tx_index = tx_end_index;
+ slot_use_count = ++quota->slot_use_count;
+ } else {
+ slot_use_count = 0;
+ }
+
+ spin_unlock(&state->quota_spinlock);
+
+ if (slot_use_count)
+ dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
+ state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ size, slot_use_count, header);
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+ if (size != 0) {
+ /*
+ * It is assumed for now that this code path
+ * only happens from calls inside this file.
+ *
+ * External callers are through the vchiq_queue_message
+ * path which always sets the type to be VCHIQ_MSG_DATA
+ *
+ * At first glance this appears to be correct but
+ * more review is needed.
+ */
+ copy_message_data(copy_callback, context,
+ header->data, size);
+ }
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->msgid = msgid;
+ header->size = size;
+
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ VCHIQ_MSG_TYPE(msgid), &svc_fourcc,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
+
+ /* Make sure the new header is visible to the peer. */
+ wmb();
+
+ /* Make the new tx_pos visible to the peer. */
+ local->tx_pos = state->local_tx_pos;
+ wmb();
+
+ if (service && (type == VCHIQ_MSG_CLOSE))
+ set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
+
+ if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
+ mutex_unlock(&state->slot_mutex);
+
+ remote_event_signal(state, &state->remote->trigger);
+
+ return 0;
+}
+
+/* Called by the slot handler and application threads */
+static int
+queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
+ int msgid,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context, int size)
+{
+ struct vchiq_shared_state *local;
+ struct vchiq_header *header;
+ ssize_t callback_result;
+ int svc_fourcc;
+ int ret;
+
+ local = state->local;
+
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
+ mutex_lock_killable(&state->sync_mutex))
+ return -EAGAIN;
+
+ ret = remote_event_wait(&state->sync_release_event, &local->sync_release);
+ if (ret)
+ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+
+ header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
+ local->slot_sync);
+
+ {
+ int oldmsgid = header->msgid;
+
+ if (oldmsgid != VCHIQ_MSGID_PADDING)
+ dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
+ state->id, oldmsgid);
+ }
+
+ dev_dbg(state->dev, "sync: %d: qms %s@%p,%x (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+
+ callback_result = copy_message_data(copy_callback, context,
+ header->data, size);
+
+ if (callback_result < 0) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ return -EINVAL;
+ }
+
+ if (service) {
+ vchiq_log_dump_mem(state->dev, "Sent", 0,
+ header->data,
+ min_t(size_t, 16, callback_result));
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->size = size;
+ header->msgid = msgid;
+
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ dev_dbg(state->dev,
+ "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
+
+ remote_event_signal(state, &state->remote->sync_trigger);
+
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
+ mutex_unlock(&state->sync_mutex);
+
+ return 0;
+}
+
+static inline void
+claim_slot(struct vchiq_slot_info *slot)
+{
+ slot->use_count++;
+}
+
+static void
+release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
+ struct vchiq_header *header, struct vchiq_service *service)
+{
+ mutex_lock(&state->recycle_mutex);
+
+ if (header) {
+ int msgid = header->msgid;
+
+ if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
+ mutex_unlock(&state->recycle_mutex);
+ return;
+ }
+
+ /* Rewrite the message header to prevent a double release */
+ header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
+ }
+
+ slot_info->release_count++;
+
+ if (slot_info->release_count == slot_info->use_count) {
+ int slot_queue_recycle;
+ /* Add to the freed queue */
+
+ /*
+ * A read barrier is necessary here to prevent speculative
+ * fetches of remote->slot_queue_recycle from overtaking the
+ * mutex.
+ */
+ rmb();
+
+ slot_queue_recycle = state->remote->slot_queue_recycle;
+ state->remote->slot_queue[slot_queue_recycle &
+ VCHIQ_SLOT_QUEUE_MASK] =
+ SLOT_INDEX_FROM_INFO(state, slot_info);
+ state->remote->slot_queue_recycle = slot_queue_recycle + 1;
+ dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
+ state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
+
+ /*
+ * A write barrier is necessary, but remote_event_signal
+ * contains one.
+ */
+ remote_event_signal(state, &state->remote->recycle);
+ }
+
+ mutex_unlock(&state->recycle_mutex);
+}
+
+static inline enum vchiq_reason
+get_bulk_reason(struct vchiq_bulk *bulk)
+{
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return VCHIQ_BULK_TRANSMIT_ABORTED;
+
+ return VCHIQ_BULK_TRANSMIT_DONE;
+ }
+
+ if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return VCHIQ_BULK_RECEIVE_ABORTED;
+
+ return VCHIQ_BULK_RECEIVE_DONE;
+}
+
+static int service_notify_bulk(struct vchiq_service *service,
+ struct vchiq_bulk *bulk)
+{
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
+ bulk->actual);
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
+ bulk->actual);
+ }
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
+ }
+
+ if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ struct bulk_waiter *waiter;
+
+ spin_lock(&service->state->bulk_waiter_spinlock);
+ waiter = bulk->waiter;
+ if (waiter) {
+ waiter->actual = bulk->actual;
+ complete(&waiter->event);
+ }
+ spin_unlock(&service->state->bulk_waiter_spinlock);
+ } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
+ enum vchiq_reason reason = get_bulk_reason(bulk);
+
+ return make_service_callback(service, reason, NULL, bulk);
+ }
+
+ return 0;
+}
+
+/* Called by the slot handler - don't hold the bulk mutex */
+static int
+notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
+ int retry_poll)
+{
+ int status = 0;
+
+ dev_dbg(service->state->dev,
+ "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
+ service->state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->process, queue->remote_notify, queue->remove);
+
+ queue->remote_notify = queue->process;
+
+ while (queue->remove != queue->remote_notify) {
+ struct vchiq_bulk *bulk =
+ &queue->bulks[BULK_INDEX(queue->remove)];
+
+ /*
+ * Only generate callbacks for non-dummy bulk
+ * requests, and non-terminated services
+ */
+ if (bulk->dma_addr && service->instance) {
+ status = service_notify_bulk(service, bulk);
+ if (status == -EAGAIN)
+ break;
+ }
+
+ queue->remove++;
+ complete(&service->bulk_remove_event);
+ }
+ if (!retry_poll)
+ status = 0;
+
+ if (status == -EAGAIN)
+ request_poll(service->state, service, (queue == &service->bulk_tx) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+
+ return status;
+}
+
+static void
+poll_services_of_group(struct vchiq_state *state, int group)
+{
+ u32 flags = atomic_xchg(&state->poll_services[group], 0);
+ int i;
+
+ for (i = 0; flags; i++) {
+ struct vchiq_service *service;
+ u32 service_flags;
+
+ if ((flags & BIT(i)) == 0)
+ continue;
+
+ service = find_service_by_port(state, (group << 5) + i);
+ flags &= ~BIT(i);
+
+ if (!service)
+ continue;
+
+ service_flags = atomic_xchg(&service->poll_flags, 0);
+ if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
+ dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
+ state->id, service->localport, service->remoteport);
+
+ /*
+ * Make it look like a client, because
+ * it must be removed and not left in
+ * the LISTENING state.
+ */
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
+
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
+ } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
+ dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
+ state->id, service->localport, service->remoteport);
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
+ request_poll(state, service, VCHIQ_POLL_TERMINATE);
+ }
+ if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
+ notify_bulks(service, &service->bulk_tx, RETRY_POLL);
+ if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
+ notify_bulks(service, &service->bulk_rx, RETRY_POLL);
+ vchiq_service_put(service);
+ }
+}
+
+/* Called by the slot handler thread */
+static void
+poll_services(struct vchiq_state *state)
+{
+ int group;
+
+ for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
+ poll_services_of_group(state, group);
+}
+
+static void
+cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
+{
+ if (pagelistinfo->scatterlist_mapped) {
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ }
+
+ if (pagelistinfo->pages_need_release)
+ unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
+
+ dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
+ pagelistinfo->pagelist, pagelistinfo->dma_addr);
+}
+
+static inline bool
+is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k)
+{
+ u32 tmp;
+
+ if (!k)
+ return false;
+
+ tmp = (addrs[k - 1] & PAGE_MASK) +
+ (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
+
+ return tmp == (addr & PAGE_MASK);
+}
+
+/* There is a potential problem with partial cache lines (pages?)
+ * at the ends of the block when reading. If the CPU accessed anything in
+ * the same line (page?) then it may have pulled old data into the cache,
+ * obscuring the new data underneath. We can solve this by transferring the
+ * partial cache lines separately, and allowing the ARM to copy into the
+ * cached area.
+ */
+static struct vchiq_pagelist_info *
+create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
+{
+ struct vchiq_drv_mgmt *drv_mgmt;
+ struct pagelist *pagelist;
+ struct vchiq_pagelist_info *pagelistinfo;
+ struct page **pages;
+ u32 *addrs;
+ unsigned int num_pages, offset, i, k;
+ int actual_pages;
+ size_t pagelist_size;
+ struct scatterlist *scatterlist, *sg;
+ int dma_buffers;
+ unsigned int cache_line_size;
+ dma_addr_t dma_addr;
+ size_t count = bulk->size;
+ unsigned short type = (bulk->dir == VCHIQ_BULK_RECEIVE)
+ ? PAGELIST_READ : PAGELIST_WRITE;
+
+ if (count >= INT_MAX - PAGE_SIZE)
+ return NULL;
+
+ drv_mgmt = dev_get_drvdata(instance->state->dev);
+
+ if (bulk->offset)
+ offset = (uintptr_t)bulk->offset & (PAGE_SIZE - 1);
+ else
+ offset = (uintptr_t)bulk->uoffset & (PAGE_SIZE - 1);
+ num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
+
+ if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
+ sizeof(struct vchiq_pagelist_info)) /
+ (sizeof(u32) + sizeof(pages[0]) +
+ sizeof(struct scatterlist)))
+ return NULL;
+
+ pagelist_size = sizeof(struct pagelist) +
+ (num_pages * sizeof(u32)) +
+ (num_pages * sizeof(pages[0]) +
+ (num_pages * sizeof(struct scatterlist))) +
+ sizeof(struct vchiq_pagelist_info);
+
+ /* Allocate enough storage to hold the page pointers and the page
+ * list
+ */
+ pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
+ GFP_KERNEL);
+
+ dev_dbg(instance->state->dev, "arm: %p\n", pagelist);
+
+ if (!pagelist)
+ return NULL;
+
+ addrs = pagelist->addrs;
+ pages = (struct page **)(addrs + num_pages);
+ scatterlist = (struct scatterlist *)(pages + num_pages);
+ pagelistinfo = (struct vchiq_pagelist_info *)
+ (scatterlist + num_pages);
+
+ pagelist->length = count;
+ pagelist->type = type;
+ pagelist->offset = offset;
+
+ /* Populate the fields of the pagelistinfo structure */
+ pagelistinfo->pagelist = pagelist;
+ pagelistinfo->pagelist_buffer_size = pagelist_size;
+ pagelistinfo->dma_addr = dma_addr;
+ pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ pagelistinfo->num_pages = num_pages;
+ pagelistinfo->pages_need_release = 0;
+ pagelistinfo->pages = pages;
+ pagelistinfo->scatterlist = scatterlist;
+ pagelistinfo->scatterlist_mapped = 0;
+
+ if (bulk->offset) {
+ unsigned long length = count;
+ unsigned int off = offset;
+
+ for (actual_pages = 0; actual_pages < num_pages;
+ actual_pages++) {
+ struct page *pg =
+ vmalloc_to_page(((unsigned int *)bulk->offset +
+ (actual_pages * PAGE_SIZE)));
+ size_t bytes = PAGE_SIZE - off;
+
+ if (!pg) {
+ cleanup_pagelistinfo(instance, pagelistinfo);
+ return NULL;
+ }
+
+ if (bytes > length)
+ bytes = length;
+ pages[actual_pages] = pg;
+ length -= bytes;
+ off = 0;
+ }
+ /* do not try and release vmalloc pages */
+ } else {
+ actual_pages =
+ pin_user_pages_fast((unsigned long)bulk->uoffset & PAGE_MASK, num_pages,
+ type == PAGELIST_READ, pages);
+
+ if (actual_pages != num_pages) {
+ dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
+ actual_pages, num_pages);
+
+ /* This is probably due to the process being killed */
+ if (actual_pages > 0)
+ unpin_user_pages(pages, actual_pages);
+ cleanup_pagelistinfo(instance, pagelistinfo);
+ return NULL;
+ }
+ /* release user pages */
+ pagelistinfo->pages_need_release = 1;
+ }
+
+ /*
+ * Initialize the scatterlist so that the magic cookie
+ * is filled if debugging is enabled
+ */
+ sg_init_table(scatterlist, num_pages);
+ /* Now set the pages for each scatterlist */
+ for (i = 0; i < num_pages; i++) {
+ unsigned int len = PAGE_SIZE - offset;
+
+ if (len > count)
+ len = count;
+ sg_set_page(scatterlist + i, pages[i], len, offset);
+ offset = 0;
+ count -= len;
+ }
+
+ dma_buffers = dma_map_sg(instance->state->dev,
+ scatterlist,
+ num_pages,
+ pagelistinfo->dma_dir);
+
+ if (dma_buffers == 0) {
+ cleanup_pagelistinfo(instance, pagelistinfo);
+ return NULL;
+ }
+
+ pagelistinfo->scatterlist_mapped = 1;
+
+ /* Combine adjacent blocks for performance */
+ k = 0;
+ for_each_sg(scatterlist, sg, dma_buffers, i) {
+ unsigned int len = sg_dma_len(sg);
+ dma_addr_t addr = sg_dma_address(sg);
+
+ /* Note: addrs is the address + page_count - 1
+ * The firmware expects blocks after the first to be page-
+ * aligned and a multiple of the page size
+ */
+ WARN_ON(len == 0);
+ WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
+ WARN_ON(i && (addr & ~PAGE_MASK));
+ if (is_adjacent_block(addrs, addr, k))
+ addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ else
+ addrs[k++] = (addr & PAGE_MASK) |
+ (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
+ }
+
+ /* Partial cache lines (fragments) require special measures */
+ cache_line_size = drv_mgmt->info->cache_line_size;
+ if ((type == PAGELIST_READ) &&
+ ((pagelist->offset & (cache_line_size - 1)) ||
+ ((pagelist->offset + pagelist->length) & (cache_line_size - 1)))) {
+ char *fragments;
+
+ if (down_interruptible(&drv_mgmt->free_fragments_sema)) {
+ cleanup_pagelistinfo(instance, pagelistinfo);
+ return NULL;
+ }
+
+ WARN_ON(!drv_mgmt->free_fragments);
+
+ down(&drv_mgmt->free_fragments_mutex);
+ fragments = drv_mgmt->free_fragments;
+ WARN_ON(!fragments);
+ drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments;
+ up(&drv_mgmt->free_fragments_mutex);
+ pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
+ (fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size;
+ }
+
+ return pagelistinfo;
+}
+
+static void
+free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
+ int actual)
+{
+ struct vchiq_drv_mgmt *drv_mgmt;
+ struct pagelist *pagelist = pagelistinfo->pagelist;
+ struct page **pages = pagelistinfo->pages;
+ unsigned int num_pages = pagelistinfo->num_pages;
+ unsigned int cache_line_size;
+
+ dev_dbg(instance->state->dev, "arm: %p, %d\n", pagelistinfo->pagelist, actual);
+
+ drv_mgmt = dev_get_drvdata(instance->state->dev);
+
+ /*
+ * NOTE: dma_unmap_sg must be called before the
+ * cpu can touch any of the data/pages.
+ */
+ dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
+ pagelistinfo->num_pages, pagelistinfo->dma_dir);
+ pagelistinfo->scatterlist_mapped = 0;
+
+ /* Deal with any partial cache lines (fragments) */
+ cache_line_size = drv_mgmt->info->cache_line_size;
+ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) {
+ char *fragments = drv_mgmt->fragments_base +
+ (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
+ drv_mgmt->fragments_size;
+ int head_bytes, tail_bytes;
+
+ head_bytes = (cache_line_size - pagelist->offset) &
+ (cache_line_size - 1);
+ tail_bytes = (pagelist->offset + actual) &
+ (cache_line_size - 1);
+
+ if ((actual >= 0) && (head_bytes != 0)) {
+ if (head_bytes > actual)
+ head_bytes = actual;
+
+ memcpy_to_page(pages[0], pagelist->offset,
+ fragments, head_bytes);
+ }
+ if ((actual >= 0) && (head_bytes < actual) &&
+ (tail_bytes != 0))
+ memcpy_to_page(pages[num_pages - 1],
+ (pagelist->offset + actual) &
+ (PAGE_SIZE - 1) & ~(cache_line_size - 1),
+ fragments + cache_line_size,
+ tail_bytes);
+
+ down(&drv_mgmt->free_fragments_mutex);
+ *(char **)fragments = drv_mgmt->free_fragments;
+ drv_mgmt->free_fragments = fragments;
+ up(&drv_mgmt->free_fragments_mutex);
+ up(&drv_mgmt->free_fragments_sema);
+ }
+
+ /* Need to mark all the pages dirty. */
+ if (pagelist->type != PAGELIST_WRITE &&
+ pagelistinfo->pages_need_release) {
+ unsigned int i;
+
+ for (i = 0; i < num_pages; i++)
+ set_page_dirty(pages[i]);
+ }
+
+ cleanup_pagelistinfo(instance, pagelistinfo);
+}
+
+static int
+vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
+{
+ struct vchiq_pagelist_info *pagelistinfo;
+
+ pagelistinfo = create_pagelist(instance, bulk);
+
+ if (!pagelistinfo)
+ return -ENOMEM;
+
+ bulk->dma_addr = pagelistinfo->dma_addr;
+
+ /*
+ * Store the pagelistinfo address in remote_data,
+ * which isn't used by the slave.
+ */
+ bulk->remote_data = pagelistinfo;
+
+ return 0;
+}
+
+static void
+vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
+{
+ if (bulk && bulk->remote_data && bulk->actual)
+ free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
+ bulk->actual);
+}
+
+/* Called with the bulk_mutex held */
+static void
+abort_outstanding_bulks(struct vchiq_service *service,
+ struct vchiq_bulk_queue *queue)
+{
+ int is_tx = (queue == &service->bulk_tx);
+
+ dev_dbg(service->state->dev,
+ "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
+ service->state->id, service->localport,
+ is_tx ? 't' : 'r', queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ WARN_ON((int)(queue->local_insert - queue->process) < 0);
+ WARN_ON((int)(queue->remote_insert - queue->process) < 0);
+
+ while ((queue->process != queue->local_insert) ||
+ (queue->process != queue->remote_insert)) {
+ struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
+
+ if (queue->process == queue->remote_insert) {
+ /* fabricate a matching dummy bulk */
+ bulk->remote_data = NULL;
+ bulk->remote_size = 0;
+ queue->remote_insert++;
+ }
+
+ if (queue->process != queue->local_insert) {
+ vchiq_complete_bulk(service->instance, bulk);
+
+ dev_dbg(service->state->dev,
+ "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ &service->base.fourcc,
+ service->remoteport, bulk->size, bulk->remote_size);
+ } else {
+ /* fabricate a matching dummy bulk */
+ bulk->dma_addr = 0;
+ bulk->size = 0;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+ bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
+ VCHIQ_BULK_RECEIVE;
+ queue->local_insert++;
+ }
+
+ queue->process++;
+ }
+}
+
+static int
+parse_open(struct vchiq_state *state, struct vchiq_header *header)
+{
+ const struct vchiq_open_payload *payload;
+ struct vchiq_openack_payload ack_payload;
+ struct vchiq_service *service = NULL;
+ int msgid, size;
+ int openack_id;
+ unsigned int localport, remoteport, fourcc;
+ short version, version_min;
+
+ msgid = header->msgid;
+ size = header->size;
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+ if (size < sizeof(struct vchiq_open_payload))
+ goto fail_open;
+
+ payload = (struct vchiq_open_payload *)header->data;
+ fourcc = payload->fourcc;
+ dev_dbg(state->dev, "core: %d: prs OPEN@%p (%d->'%p4cc')\n",
+ state->id, header, localport, &fourcc);
+
+ service = get_listening_service(state, fourcc);
+ if (!service)
+ goto fail_open;
+
+ /* A matching service exists */
+ version = payload->version;
+ version_min = payload->version_min;
+
+ if ((service->version < version_min) || (version < service->version_min)) {
+ /* Version mismatch */
+ dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
+ state->id, service->localport, &fourcc,
+ service->version, service->version_min, version, version_min);
+ vchiq_service_put(service);
+ service = NULL;
+ goto fail_open;
+ }
+ service->peer_version = version;
+
+ if (service->srvstate != VCHIQ_SRVSTATE_LISTENING)
+ goto done;
+
+ ack_payload.version = service->version;
+ openack_id = MAKE_OPENACK(service->localport, remoteport);
+
+ if (state->version_common < VCHIQ_VERSION_SYNCHRONOUS_MODE)
+ service->sync = 0;
+
+ /* Acknowledge the OPEN */
+ if (service->sync) {
+ if (queue_message_sync(state, NULL, openack_id,
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload)) == -EAGAIN)
+ goto bail_not_ready;
+
+ /* The service is now open */
+ set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
+ } else {
+ if (queue_message(state, NULL, openack_id,
+ memcpy_copy_callback, &ack_payload,
+ sizeof(ack_payload), 0) == -EINTR)
+ goto bail_not_ready;
+
+ /* The service is now open */
+ set_service_state(service, VCHIQ_SRVSTATE_OPEN);
+ }
+
+done:
+ /* Success - the message has been dealt with */
+ vchiq_service_put(service);
+ return 1;
+
+fail_open:
+ /* No available service, or an invalid request - send a CLOSE */
+ if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
+ NULL, NULL, 0, 0) == -EINTR)
+ goto bail_not_ready;
+
+ return 1;
+
+bail_not_ready:
+ if (service)
+ vchiq_service_put(service);
+
+ return 0;
+}
+
+/**
+ * parse_message() - parses a single message from the rx slot
+ * @state: vchiq state struct
+ * @header: message header
+ *
+ * Context: Process context
+ *
+ * Return:
+ * * >= 0 - size of the parsed message payload (without header)
+ * * -EINVAL - fatal error occurred, bail out is required
+ */
+static int
+parse_message(struct vchiq_state *state, struct vchiq_header *header)
+{
+ struct vchiq_service *service = NULL;
+ unsigned int localport, remoteport;
+ int msgid, size, type, ret = -EINVAL;
+ int svc_fourcc;
+
+ DEBUG_INITIALISE(state->local);
+
+ DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
+ msgid = header->msgid;
+ DEBUG_VALUE(PARSE_MSGID, msgid);
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ if (type != VCHIQ_MSG_DATA)
+ VCHIQ_STATS_INC(state, ctrl_rx_count);
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ case VCHIQ_MSG_CLOSE:
+ case VCHIQ_MSG_DATA:
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ service = find_service_by_port(state, localport);
+ if ((!service ||
+ ((service->remoteport != remoteport) &&
+ (service->remoteport != VCHIQ_PORT_FREE))) &&
+ (localport == 0) &&
+ (type == VCHIQ_MSG_CLOSE)) {
+ /*
+ * This could be a CLOSE from a client which
+ * hadn't yet received the OPENACK - look for
+ * the connected service
+ */
+ if (service)
+ vchiq_service_put(service);
+ service = get_connected_service(state, remoteport);
+ if (service)
+ dev_warn(state->dev,
+ "core: %d: prs %s@%p (%d->%d) - found connected service %d\n",
+ state->id, msg_type_str(type), header,
+ remoteport, localport, service->localport);
+ }
+
+ if (!service) {
+ dev_err(state->dev,
+ "core: %d: prs %s@%p (%d->%d) - invalid/closed service %d\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, localport);
+ goto skip_message;
+ }
+ break;
+ default:
+ break;
+ }
+
+ svc_fourcc = service ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
+ msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
+
+ if (((unsigned long)header & VCHIQ_SLOT_MASK) +
+ calc_stride(size) > VCHIQ_SLOT_SIZE) {
+ dev_err(state->dev, "core: header %p (msgid %x) - size %x too big for slot\n",
+ header, (unsigned int)msgid, (unsigned int)size);
+ WARN(1, "oversized for slot\n");
+ }
+
+ switch (type) {
+ case VCHIQ_MSG_OPEN:
+ WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
+ if (!parse_open(state, header))
+ goto bail_not_ready;
+ break;
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ dev_dbg(state->dev,
+ "core: %d: prs OPENACK@%p,%x (%d->%d) v:%d\n",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ set_service_state(service, VCHIQ_SRVSTATE_OPEN);
+ complete(&service->remove_event);
+ } else {
+ dev_err(state->dev, "core: OPENACK received in state %s\n",
+ srvstate_names[service->srvstate]);
+ }
+ break;
+ case VCHIQ_MSG_CLOSE:
+ WARN_ON(size); /* There should be no data */
+
+ dev_dbg(state->dev, "core: %d: prs CLOSE@%p (%d->%d)\n",
+ state->id, header, remoteport, localport);
+
+ mark_service_closing_internal(service, 1);
+
+ if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
+ goto bail_not_ready;
+
+ dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
+ &service->base.fourcc, service->localport, service->remoteport);
+ break;
+ case VCHIQ_MSG_DATA:
+ dev_dbg(state->dev, "core: %d: prs DATA@%p,%x (%d->%d)\n",
+ state->id, header, size, remoteport, localport);
+
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
+ header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
+ claim_slot(state->rx_info);
+ DEBUG_TRACE(PARSE_LINE);
+ if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == -EAGAIN) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
+ } else {
+ VCHIQ_STATS_INC(state, error_count);
+ }
+ break;
+ case VCHIQ_MSG_CONNECT:
+ dev_dbg(state->dev, "core: %d: prs CONNECT@%p\n",
+ state->id, header);
+ state->version_common = ((struct vchiq_slot_zero *)
+ state->slot_data)->version;
+ complete(&state->connect);
+ break;
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ /*
+ * We should never receive a bulk request from the
+ * other side since we're not setup to perform as the
+ * master.
+ */
+ WARN_ON(1);
+ break;
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ struct vchiq_bulk_queue *queue;
+ struct vchiq_bulk *bulk;
+
+ queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ &service->bulk_rx : &service->bulk_tx;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (mutex_lock_killable(&service->bulk_mutex)) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ if ((int)(queue->remote_insert -
+ queue->local_insert) >= 0) {
+ dev_err(state->dev,
+ "core: %d: prs %s@%p (%d->%d) unexpected (ri=%d,li=%d)\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, queue->remote_insert, queue->local_insert);
+ mutex_unlock(&service->bulk_mutex);
+ break;
+ }
+ if (queue->process != queue->remote_insert) {
+ dev_err(state->dev, "%s: p %x != ri %x\n",
+ __func__, queue->process,
+ queue->remote_insert);
+ mutex_unlock(&service->bulk_mutex);
+ goto bail_not_ready;
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
+ bulk->actual = *(int *)header->data;
+ queue->remote_insert++;
+
+ dev_dbg(state->dev, "core: %d: prs %s@%p (%d->%d) %x@%pad\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, bulk->actual, &bulk->dma_addr);
+
+ dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
+ queue->local_insert, queue->remote_insert, queue->process);
+
+ DEBUG_TRACE(PARSE_LINE);
+ WARN_ON(queue->process == queue->local_insert);
+ vchiq_complete_bulk(service->instance, bulk);
+ queue->process++;
+ mutex_unlock(&service->bulk_mutex);
+ DEBUG_TRACE(PARSE_LINE);
+ notify_bulks(service, queue, RETRY_POLL);
+ DEBUG_TRACE(PARSE_LINE);
+ }
+ break;
+ case VCHIQ_MSG_PADDING:
+ dev_dbg(state->dev, "core: %d: prs PADDING@%p,%x\n",
+ state->id, header, size);
+ break;
+ case VCHIQ_MSG_PAUSE:
+ /* If initiated, signal the application thread */
+ dev_dbg(state->dev, "core: %d: prs PAUSE@%p,%x\n",
+ state->id, header, size);
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
+ dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
+ state->id);
+ break;
+ }
+ if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
+ /* Send a PAUSE in response */
+ if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_UNLOCK) == -EINTR)
+ goto bail_not_ready;
+ }
+ /* At this point slot_mutex is held */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
+ break;
+ case VCHIQ_MSG_RESUME:
+ dev_dbg(state->dev, "core: %d: prs RESUME@%p,%x\n",
+ state->id, header, size);
+ /* Release the slot mutex */
+ mutex_unlock(&state->slot_mutex);
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ break;
+
+ case VCHIQ_MSG_REMOTE_USE:
+ vchiq_on_remote_use(state);
+ break;
+ case VCHIQ_MSG_REMOTE_RELEASE:
+ vchiq_on_remote_release(state);
+ break;
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE:
+ break;
+
+ default:
+ dev_err(state->dev, "core: %d: prs invalid msgid %x@%p,%x\n",
+ state->id, msgid, header, size);
+ WARN(1, "invalid message\n");
+ break;
+ }
+
+skip_message:
+ ret = size;
+
+bail_not_ready:
+ if (service)
+ vchiq_service_put(service);
+
+ return ret;
+}
+
+/* Called by the slot handler thread */
+static void
+parse_rx_slots(struct vchiq_state *state)
+{
+ struct vchiq_shared_state *remote = state->remote;
+ int tx_pos;
+
+ DEBUG_INITIALISE(state->local);
+
+ tx_pos = remote->tx_pos;
+
+ while (state->rx_pos != tx_pos) {
+ struct vchiq_header *header;
+ int size;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (!state->rx_data) {
+ int rx_index;
+
+ WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
+ rx_index = remote->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
+ state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
+ rx_index);
+ state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
+
+ /*
+ * Initialise use_count to one, and increment
+ * release_count at the end of the slot to avoid
+ * releasing the slot prematurely.
+ */
+ state->rx_info->use_count = 1;
+ state->rx_info->release_count = 0;
+ }
+
+ header = (struct vchiq_header *)(state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ size = parse_message(state, header);
+ if (size < 0)
+ return;
+
+ state->rx_pos += calc_stride(size);
+
+ DEBUG_TRACE(PARSE_LINE);
+ /*
+ * Perform some housekeeping when the end of the slot is
+ * reached.
+ */
+ if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
+ /* Remove the extra reference count. */
+ release_slot(state, state->rx_info, NULL, NULL);
+ state->rx_data = NULL;
+ }
+ }
+}
+
+/**
+ * handle_poll() - handle service polling and other rare conditions
+ * @state: vchiq state struct
+ *
+ * Context: Process context
+ *
+ * Return:
+ * * 0 - poll handled successful
+ * * -EAGAIN - retry later
+ */
+static int
+handle_poll(struct vchiq_state *state)
+{
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Poll the services as requested */
+ poll_services(state);
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSING:
+ if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_UNLOCK) != -EINTR) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
+ } else {
+ /* Retry later */
+ return -EAGAIN;
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_RESUMING:
+ if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_LOCK) != -EINTR) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ } else {
+ /*
+ * This should really be impossible,
+ * since the PAUSE should have flushed
+ * through outstanding messages.
+ */
+ dev_err(state->dev, "core: Failed to send RESUME message\n");
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Called by the slot handler thread */
+static int
+slot_handler_func(void *v)
+{
+ struct vchiq_state *state = v;
+ struct vchiq_shared_state *local = state->local;
+ int ret;
+
+ DEBUG_INITIALISE(local);
+
+ while (!kthread_should_stop()) {
+ DEBUG_COUNT(SLOT_HANDLER_COUNT);
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ ret = remote_event_wait(&state->trigger_event, &local->trigger);
+ if (ret)
+ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ if (state->poll_needed) {
+ state->poll_needed = 0;
+
+ /*
+ * Handle service polling and other rare conditions here
+ * out of the mainline code
+ */
+ if (handle_poll(state) == -EAGAIN)
+ state->poll_needed = 1;
+ }
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ parse_rx_slots(state);
+ }
+ return 0;
+}
+
+/* Called by the recycle thread */
+static int
+recycle_func(void *v)
+{
+ struct vchiq_state *state = v;
+ struct vchiq_shared_state *local = state->local;
+ u32 *found;
+ size_t length;
+ int ret;
+
+ length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
+
+ found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
+ GFP_KERNEL);
+ if (!found)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ ret = remote_event_wait(&state->recycle_event, &local->recycle);
+ if (ret)
+ return ret;
+
+ process_free_queue(state, found, length);
+ }
+ return 0;
+}
+
+/* Called by the sync thread */
+static int
+sync_func(void *v)
+{
+ struct vchiq_state *state = v;
+ struct vchiq_shared_state *local = state->local;
+ struct vchiq_header *header =
+ (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+ int svc_fourcc;
+ int ret;
+
+ while (!kthread_should_stop()) {
+ struct vchiq_service *service;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
+ if (ret)
+ return ret;
+
+ /* Ensure that reads don't overtake the remote_event_wait. */
+ rmb();
+
+ msgid = header->msgid;
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ service = find_service_by_port(state, localport);
+
+ if (!service) {
+ dev_err(state->dev,
+ "sync: %d: sf %s@%p (%d->%d) - invalid/closed service %d\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, localport);
+ release_message_sync(state, header);
+ continue;
+ }
+
+ svc_fourcc = service->base.fourcc;
+
+ dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
+ msg_type_str(type), &svc_fourcc, remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ dev_err(state->dev, "sync: %d: sf OPENACK@%p,%x (%d->%d) v:%d\n",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
+ service->sync = 1;
+ complete(&service->remove_event);
+ }
+ release_message_sync(state, header);
+ break;
+
+ case VCHIQ_MSG_DATA:
+ dev_dbg(state->dev, "sync: %d: sf DATA@%p,%x (%d->%d)\n",
+ state->id, header, size, remoteport, localport);
+
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == -EAGAIN)
+ dev_err(state->dev,
+ "sync: error: synchronous callback to service %d returns -EAGAIN\n",
+ localport);
+ }
+ break;
+
+ default:
+ dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%p,%x\n",
+ state->id, msgid, header, size);
+ release_message_sync(state, header);
+ break;
+ }
+
+ vchiq_service_put(service);
+ }
+
+ return 0;
+}
+
+inline const char *
+get_conn_state_name(enum vchiq_connstate conn_state)
+{
+ return conn_state_names[conn_state];
+}
+
+struct vchiq_slot_zero *
+vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
+{
+ int mem_align =
+ (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
+ struct vchiq_slot_zero *slot_zero =
+ (struct vchiq_slot_zero *)(mem_base + mem_align);
+ int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
+ int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
+
+ check_sizes();
+
+ /* Ensure there is enough memory to run an absolutely minimum system */
+ num_slots -= first_data_slot;
+
+ if (num_slots < 4) {
+ dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
+ __func__, mem_size);
+ return NULL;
+ }
+
+ memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
+
+ slot_zero->magic = VCHIQ_MAGIC;
+ slot_zero->version = VCHIQ_VERSION;
+ slot_zero->version_min = VCHIQ_VERSION_MIN;
+ slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
+ slot_zero->slot_size = VCHIQ_SLOT_SIZE;
+ slot_zero->max_slots = VCHIQ_MAX_SLOTS;
+ slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
+
+ slot_zero->master.slot_sync = first_data_slot;
+ slot_zero->master.slot_first = first_data_slot + 1;
+ slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
+ slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
+ slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
+ slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
+
+ return slot_zero;
+}
+
+int
+vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
+{
+ struct vchiq_shared_state *local;
+ struct vchiq_shared_state *remote;
+ char threadname[16];
+ int i, ret;
+
+ local = &slot_zero->slave;
+ remote = &slot_zero->master;
+
+ if (local->initialised) {
+ if (remote->initialised)
+ dev_err(dev, "local state has already been initialised\n");
+ else
+ dev_err(dev, "master/slave mismatch two slaves\n");
+
+ return -EINVAL;
+ }
+
+ memset(state, 0, sizeof(struct vchiq_state));
+
+ state->dev = dev;
+
+ /*
+ * initialize shared state pointers
+ */
+
+ state->local = local;
+ state->remote = remote;
+ state->slot_data = (struct vchiq_slot *)slot_zero;
+
+ /*
+ * initialize events and mutexes
+ */
+
+ init_completion(&state->connect);
+ mutex_init(&state->mutex);
+ mutex_init(&state->slot_mutex);
+ mutex_init(&state->recycle_mutex);
+ mutex_init(&state->sync_mutex);
+
+ spin_lock_init(&state->msg_queue_spinlock);
+ spin_lock_init(&state->bulk_waiter_spinlock);
+ spin_lock_init(&state->quota_spinlock);
+
+ init_completion(&state->slot_available_event);
+ init_completion(&state->data_quota_event);
+
+ state->slot_queue_available = 0;
+
+ for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
+ struct vchiq_service_quota *quota = &state->service_quotas[i];
+
+ init_completion(&quota->quota_event);
+ }
+
+ for (i = local->slot_first; i <= local->slot_last; i++) {
+ local->slot_queue[state->slot_queue_available] = i;
+ state->slot_queue_available++;
+ complete(&state->slot_available_event);
+ }
+
+ state->default_slot_quota = state->slot_queue_available / 2;
+ state->default_message_quota =
+ min_t(unsigned short, state->default_slot_quota * 256, ~0);
+
+ state->previous_data_index = -1;
+ state->data_use_count = 0;
+ state->data_quota = state->slot_queue_available - 1;
+
+ remote_event_create(&state->trigger_event, &local->trigger);
+ local->tx_pos = 0;
+ remote_event_create(&state->recycle_event, &local->recycle);
+ local->slot_queue_recycle = state->slot_queue_available;
+ remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
+ remote_event_create(&state->sync_release_event, &local->sync_release);
+
+ /* At start-of-day, the slot is empty and available */
+ ((struct vchiq_header *)
+ SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
+ VCHIQ_MSGID_PADDING;
+ remote_event_signal_local(&state->sync_release_event, &local->sync_release);
+
+ local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
+
+ ret = vchiq_platform_init_state(state);
+ if (ret)
+ return ret;
+
+ /*
+ * bring up slot handler thread
+ */
+ snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
+ state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
+
+ if (IS_ERR(state->slot_handler_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ return PTR_ERR(state->slot_handler_thread);
+ }
+ set_user_nice(state->slot_handler_thread, -19);
+
+ snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
+ state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
+ if (IS_ERR(state->recycle_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ ret = PTR_ERR(state->recycle_thread);
+ goto fail_free_handler_thread;
+ }
+ set_user_nice(state->recycle_thread, -19);
+
+ snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
+ state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
+ if (IS_ERR(state->sync_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ ret = PTR_ERR(state->sync_thread);
+ goto fail_free_recycle_thread;
+ }
+ set_user_nice(state->sync_thread, -20);
+
+ wake_up_process(state->slot_handler_thread);
+ wake_up_process(state->recycle_thread);
+ wake_up_process(state->sync_thread);
+
+ /* Indicate readiness to the other side */
+ local->initialised = 1;
+
+ return 0;
+
+fail_free_recycle_thread:
+ kthread_stop(state->recycle_thread);
+fail_free_handler_thread:
+ kthread_stop(state->slot_handler_thread);
+
+ return ret;
+}
+
+void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int pos;
+
+ if (!service)
+ return;
+
+ while (service->msg_queue_write == service->msg_queue_read +
+ VCHIQ_MAX_SLOTS) {
+ if (wait_for_completion_interruptible(&service->msg_queue_pop))
+ flush_signals(current);
+ }
+
+ pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
+ service->msg_queue_write++;
+ service->msg_queue[pos] = header;
+
+ complete(&service->msg_queue_push);
+}
+EXPORT_SYMBOL(vchiq_msg_queue_push);
+
+struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct vchiq_header *header;
+ int pos;
+
+ if (!service)
+ return NULL;
+
+ if (service->msg_queue_write == service->msg_queue_read)
+ return NULL;
+
+ while (service->msg_queue_write == service->msg_queue_read) {
+ if (wait_for_completion_interruptible(&service->msg_queue_push))
+ flush_signals(current);
+ }
+
+ pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
+ service->msg_queue_read++;
+ header = service->msg_queue[pos];
+
+ complete(&service->msg_queue_pop);
+
+ return header;
+}
+EXPORT_SYMBOL(vchiq_msg_hold);
+
+static int vchiq_validate_params(struct vchiq_state *state,
+ const struct vchiq_service_params_kernel *params)
+{
+ if (!params->callback || !params->fourcc) {
+ dev_err(state->dev, "Can't add service, invalid params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Called from application thread when a client or server service is created. */
+struct vchiq_service *
+vchiq_add_service_internal(struct vchiq_state *state,
+ const struct vchiq_service_params_kernel *params,
+ int srvstate, struct vchiq_instance *instance,
+ void (*userdata_term)(void *userdata))
+{
+ struct vchiq_service *service;
+ struct vchiq_service __rcu **pservice = NULL;
+ struct vchiq_service_quota *quota;
+ int ret;
+ int i;
+
+ ret = vchiq_validate_params(state, params);
+ if (ret)
+ return NULL;
+
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service)
+ return service;
+
+ service->base.fourcc = params->fourcc;
+ service->base.callback = params->callback;
+ service->base.userdata = params->userdata;
+ service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
+ kref_init(&service->ref_count);
+ service->srvstate = VCHIQ_SRVSTATE_FREE;
+ service->userdata_term = userdata_term;
+ service->localport = VCHIQ_PORT_FREE;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
+ VCHIQ_FOURCC_INVALID : params->fourcc;
+ service->auto_close = 1;
+ atomic_set(&service->poll_flags, 0);
+ service->version = params->version;
+ service->version_min = params->version_min;
+ service->state = state;
+ service->instance = instance;
+ init_completion(&service->remove_event);
+ init_completion(&service->bulk_remove_event);
+ init_completion(&service->msg_queue_pop);
+ init_completion(&service->msg_queue_push);
+ mutex_init(&service->bulk_mutex);
+
+ /*
+ * Although it is perfectly possible to use a spinlock
+ * to protect the creation of services, it is overkill as it
+ * disables interrupts while the array is searched.
+ * The only danger is of another thread trying to create a
+ * service - service deletion is safe.
+ * Therefore it is preferable to use state->mutex which,
+ * although slower to claim, doesn't block interrupts while
+ * it is held.
+ */
+
+ mutex_lock(&state->mutex);
+
+ /* Prepare to use a previously unused service */
+ if (state->unused_service < VCHIQ_MAX_SERVICES)
+ pservice = &state->services[state->unused_service];
+
+ if (srvstate == VCHIQ_SRVSTATE_OPENING) {
+ for (i = 0; i < state->unused_service; i++) {
+ if (!rcu_access_pointer(state->services[i])) {
+ pservice = &state->services[i];
+ break;
+ }
+ }
+ } else {
+ rcu_read_lock();
+ for (i = (state->unused_service - 1); i >= 0; i--) {
+ struct vchiq_service *srv;
+
+ srv = rcu_dereference(state->services[i]);
+ if (!srv) {
+ pservice = &state->services[i];
+ } else if ((srv->public_fourcc == params->fourcc) &&
+ ((srv->instance != instance) ||
+ (srv->base.callback != params->callback))) {
+ /*
+ * There is another server using this
+ * fourcc which doesn't match.
+ */
+ pservice = NULL;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ if (pservice) {
+ service->localport = (pservice - state->services);
+ if (!handle_seq)
+ handle_seq = VCHIQ_MAX_STATES *
+ VCHIQ_MAX_SERVICES;
+ service->handle = handle_seq |
+ (state->id * VCHIQ_MAX_SERVICES) |
+ service->localport;
+ handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
+ rcu_assign_pointer(*pservice, service);
+ if (pservice == &state->services[state->unused_service])
+ state->unused_service++;
+ }
+
+ mutex_unlock(&state->mutex);
+
+ if (!pservice) {
+ kfree(service);
+ return NULL;
+ }
+
+ quota = &state->service_quotas[service->localport];
+ quota->slot_quota = state->default_slot_quota;
+ quota->message_quota = state->default_message_quota;
+ if (quota->slot_use_count == 0)
+ quota->previous_tx_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
+ - 1;
+
+ /* Bring this service online */
+ set_service_state(service, srvstate);
+
+ dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
+ (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
+ &params->fourcc, service->localport);
+
+ /* Don't unlock the service - leave it with a ref_count of 1. */
+
+ return service;
+}
+
+int
+vchiq_open_service_internal(struct vchiq_service *service, int client_id)
+{
+ struct vchiq_open_payload payload = {
+ service->base.fourcc,
+ client_id,
+ service->version,
+ service->version_min
+ };
+ int status = 0;
+
+ service->client_id = client_id;
+ vchiq_use_service_internal(service);
+ status = queue_message(service->state,
+ NULL, MAKE_OPEN(service->localport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING);
+
+ if (status)
+ return status;
+
+ /* Wait for the ACK/NAK */
+ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = -EAGAIN;
+ vchiq_release_service_internal(service);
+ } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
+ (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
+ dev_err(service->state->dev,
+ "core: %d: osi - srvstate = %s (ref %u)\n",
+ service->state->id, srvstate_names[service->srvstate],
+ kref_read(&service->ref_count));
+ status = -EINVAL;
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ vchiq_release_service_internal(service);
+ }
+
+ return status;
+}
+
+static void
+release_service_messages(struct vchiq_service *service)
+{
+ struct vchiq_state *state = service->state;
+ int slot_last = state->remote->slot_last;
+ int i;
+
+ /* Release any claimed messages aimed at this service */
+
+ if (service->sync) {
+ struct vchiq_header *header =
+ (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+ if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
+ release_message_sync(state, header);
+
+ return;
+ }
+
+ for (i = state->remote->slot_first; i <= slot_last; i++) {
+ struct vchiq_slot_info *slot_info =
+ SLOT_INFO_FROM_INDEX(state, i);
+ unsigned int pos, end;
+ char *data;
+
+ if (slot_info->release_count == slot_info->use_count)
+ continue;
+
+ data = (char *)SLOT_DATA_FROM_INDEX(state, i);
+ end = VCHIQ_SLOT_SIZE;
+ if (data == state->rx_data)
+ /*
+ * This buffer is still being read from - stop
+ * at the current read position
+ */
+ end = state->rx_pos & VCHIQ_SLOT_MASK;
+
+ pos = 0;
+
+ while (pos < end) {
+ struct vchiq_header *header =
+ (struct vchiq_header *)(data + pos);
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_DSTPORT(msgid);
+
+ if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
+ dev_dbg(state->dev, "core: fsi - hdr %p\n", header);
+ release_slot(state, slot_info, header, NULL);
+ }
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ dev_err(state->dev,
+ "core: fsi - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
+ pos, header, msgid, header->msgid, header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+ }
+}
+
+static int
+do_abort_bulks(struct vchiq_service *service)
+{
+ int status;
+
+ /* Abort any outstanding bulk transfers */
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return 0;
+ abort_outstanding_bulks(service, &service->bulk_tx);
+ abort_outstanding_bulks(service, &service->bulk_rx);
+ mutex_unlock(&service->bulk_mutex);
+
+ status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
+ if (status)
+ return 0;
+
+ status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
+ return !status;
+}
+
+static int
+close_service_complete(struct vchiq_service *service, int failstate)
+{
+ int status;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+ int newstate;
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (is_server) {
+ if (service->auto_close) {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ newstate = VCHIQ_SRVSTATE_LISTENING;
+ } else {
+ newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
+ }
+ } else {
+ newstate = VCHIQ_SRVSTATE_CLOSED;
+ }
+ set_service_state(service, newstate);
+ break;
+ case VCHIQ_SRVSTATE_LISTENING:
+ break;
+ default:
+ dev_err(service->state->dev, "core: (%x) called in state %s\n",
+ service->handle, srvstate_names[service->srvstate]);
+ WARN(1, "%s in unexpected state\n", __func__);
+ return -EINVAL;
+ }
+
+ status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
+
+ if (status != -EAGAIN) {
+ int uc = service->service_use_count;
+ int i;
+ /* Complete the close process */
+ for (i = 0; i < uc; i++)
+ /*
+ * cater for cases where close is forced and the
+ * client may not close all it's handles
+ */
+ vchiq_release_service_internal(service);
+
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
+ vchiq_free_service_internal(service);
+ } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
+ if (is_server)
+ service->closing = 0;
+
+ complete(&service->remove_event);
+ }
+ } else {
+ set_service_state(service, failstate);
+ }
+
+ return status;
+}
+
+/*
+ * Prepares a bulk transfer to be queued. The function is interruptible and is
+ * intended to be called from user threads. It may return -EAGAIN to indicate
+ * that a signal has been received and the call should be retried after being
+ * returned to user context.
+ */
+static int
+vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
+ struct vchiq_bulk *bulk_params)
+{
+ struct vchiq_bulk_queue *queue;
+ struct bulk_waiter *bulk_waiter = NULL;
+ struct vchiq_bulk *bulk;
+ struct vchiq_state *state = service->state;
+ const char dir_char = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
+ const int dir_msgtype = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ int status = -EINVAL;
+ int payload[2];
+
+ if (bulk_params->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ bulk_waiter = bulk_params->waiter;
+ init_completion(&bulk_waiter->event);
+ bulk_waiter->actual = 0;
+ bulk_waiter->bulk = NULL;
+ }
+
+ queue = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ?
+ &service->bulk_tx : &service->bulk_rx;
+
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EINTR;
+
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ mutex_unlock(&service->bulk_mutex);
+ if (wait_for_completion_killable(&service->bulk_remove_event))
+ return -EINTR;
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EINTR;
+ } while (queue->local_insert == queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS);
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+
+ /* Initiliaze the 'bulk' slot with bulk parameters passed in. */
+ bulk->mode = bulk_params->mode;
+ bulk->dir = bulk_params->dir;
+ bulk->waiter = bulk_params->waiter;
+ bulk->cb_data = bulk_params->cb_data;
+ bulk->cb_userdata = bulk_params->cb_userdata;
+ bulk->size = bulk_params->size;
+ bulk->offset = bulk_params->offset;
+ bulk->uoffset = bulk_params->uoffset;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+
+ if (vchiq_prepare_bulk_data(service->instance, bulk))
+ goto unlock_error_exit;
+
+ /*
+ * Ensure that the bulk data record is visible to the peer
+ * before proceeding.
+ */
+ wmb();
+
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %p\n",
+ state->id, service->localport, service->remoteport,
+ dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data);
+
+ /*
+ * The slot mutex must be held when the service is being closed, so
+ * claim it here to ensure that isn't happening
+ */
+ if (mutex_lock_killable(&state->slot_mutex)) {
+ status = -EINTR;
+ goto cancel_bulk_error_exit;
+ }
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto unlock_both_error_exit;
+
+ payload[0] = lower_32_bits(bulk->dma_addr);
+ payload[1] = bulk->size;
+ status = queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport,
+ service->remoteport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
+ if (status)
+ goto unlock_both_error_exit;
+
+ queue->local_insert++;
+
+ mutex_unlock(&state->slot_mutex);
+ mutex_unlock(&service->bulk_mutex);
+
+ dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
+ state->id, service->localport, dir_char, queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+ if (wait_for_completion_killable(&bulk_waiter->event))
+ status = -EINTR;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = -EINVAL;
+ }
+
+ return status;
+
+unlock_both_error_exit:
+ mutex_unlock(&state->slot_mutex);
+cancel_bulk_error_exit:
+ vchiq_complete_bulk(service->instance, bulk);
+unlock_error_exit:
+ mutex_unlock(&service->bulk_mutex);
+
+ return status;
+}
+
+/* Called by the slot handler */
+int
+vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
+{
+ struct vchiq_state *state = service->state;
+ int status = 0;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+ int close_id = MAKE_CLOSE(service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport));
+
+ dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
+ service->state->id, service->localport, close_recvd,
+ srvstate_names[service->srvstate]);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ if (close_recvd) {
+ dev_err(state->dev, "core: (1) called in state %s\n",
+ srvstate_names[service->srvstate]);
+ break;
+ } else if (!is_server) {
+ vchiq_free_service_internal(service);
+ break;
+ }
+
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ status = -EINVAL;
+ } else {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
+ set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
+ }
+ complete(&service->remove_event);
+ break;
+ case VCHIQ_SRVSTATE_OPENING:
+ if (close_recvd) {
+ /* The open was rejected - tell the user */
+ set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
+ complete(&service->remove_event);
+ } else {
+ /* Shutdown mid-open - let the other side know */
+ status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
+ }
+ break;
+
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ mutex_lock(&state->sync_mutex);
+ fallthrough;
+ case VCHIQ_SRVSTATE_OPEN:
+ if (close_recvd) {
+ if (!do_abort_bulks(service))
+ status = -EAGAIN;
+ }
+
+ release_service_messages(service);
+
+ if (!status)
+ status = queue_message(state, service, close_id, NULL,
+ NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
+
+ if (status) {
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
+ mutex_unlock(&state->sync_mutex);
+ break;
+ }
+
+ if (!close_recvd) {
+ /* Change the state while the mutex is still held */
+ set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
+ mutex_unlock(&state->slot_mutex);
+ if (service->sync)
+ mutex_unlock(&state->sync_mutex);
+ break;
+ }
+
+ /* Change the state while the mutex is still held */
+ set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
+ mutex_unlock(&state->slot_mutex);
+ if (service->sync)
+ mutex_unlock(&state->sync_mutex);
+
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ if (!close_recvd)
+ /* This happens when a process is killed mid-close */
+ break;
+
+ if (!do_abort_bulks(service)) {
+ status = -EAGAIN;
+ break;
+ }
+
+ if (!status)
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (!close_recvd && is_server)
+ /* Force into LISTENING mode */
+ set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ default:
+ dev_err(state->dev, "core: (%d) called in state %s\n",
+ close_recvd, srvstate_names[service->srvstate]);
+ break;
+ }
+
+ return status;
+}
+
+/* Called from the application process upon process death */
+void
+vchiq_terminate_service_internal(struct vchiq_service *service)
+{
+ struct vchiq_state *state = service->state;
+
+ dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
+ state->id, service->localport, service->remoteport);
+
+ mark_service_closing(service);
+
+ /* Mark the service for removal by the slot handler */
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
+}
+
+/* Called from the slot handler */
+void
+vchiq_free_service_internal(struct vchiq_service *service)
+{
+ struct vchiq_state *state = service->state;
+
+ dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPENING:
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ break;
+ default:
+ dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
+ state->id, service->localport, srvstate_names[service->srvstate]);
+ return;
+ }
+
+ set_service_state(service, VCHIQ_SRVSTATE_FREE);
+
+ complete(&service->remove_event);
+
+ /* Release the initial lock */
+ vchiq_service_put(service);
+}
+
+int
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
+{
+ struct vchiq_service *service;
+ int status = 0;
+ int i;
+
+ /* Find all services registered to this client and enable them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
+ if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
+ set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
+ vchiq_service_put(service);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
+ status = queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
+ QMFLAGS_IS_BLOCKING);
+ if (status)
+ return status;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
+ if (wait_for_completion_interruptible(&state->connect))
+ return -EAGAIN;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ complete(&state->connect);
+ }
+
+ return status;
+}
+
+void
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
+{
+ struct vchiq_service *service;
+ int i;
+
+ /* Find all services registered to this client and remove them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
+ (void)vchiq_remove_service(instance, service->handle);
+ vchiq_service_put(service);
+ }
+}
+
+int
+vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ /* Unregister the service */
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = 0;
+
+ if (!service)
+ return -EINVAL;
+
+ dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
+ service->state->id, service->localport);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
+ vchiq_service_put(service);
+ return -EINVAL;
+ }
+
+ mark_service_closing(service);
+
+ if (current == service->state->slot_handler_thread) {
+ status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
+ WARN_ON(status == -EAGAIN);
+ } else {
+ /* Mark the service for termination by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
+ }
+
+ while (1) {
+ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = -EAGAIN;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ dev_warn(service->state->dev,
+ "core: %d: close_service:%d - waiting in state %s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if (!status &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
+ status = -EINVAL;
+
+ vchiq_service_put(service);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_close_service);
+
+int
+vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
+{
+ /* Unregister the service */
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = 0;
+
+ if (!service)
+ return -EINVAL;
+
+ dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
+ service->state->id, service->localport);
+
+ if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
+ vchiq_service_put(service);
+ return -EINVAL;
+ }
+
+ mark_service_closing(service);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (current == service->state->slot_handler_thread)) {
+ /*
+ * Make it look like a client, because it must be removed and
+ * not left in the LISTENING state.
+ */
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
+
+ status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
+ WARN_ON(status == -EAGAIN);
+ } else {
+ /* Mark the service for removal by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_REMOVE);
+ }
+ while (1) {
+ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = -EAGAIN;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ dev_warn(service->state->dev,
+ "core: %d: remove_service:%d - waiting in state %s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
+ status = -EINVAL;
+
+ vchiq_service_put(service);
+
+ return status;
+}
+
+int
+vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk_params)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
+
+ if (!bulk_params->offset && !bulk_params->uoffset)
+ goto error_exit;
+
+ if (vchiq_check_service(service))
+ goto error_exit;
+
+ status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params);
+
+error_exit:
+ vchiq_service_put(service);
+
+ return status;
+}
+
+int
+vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk_params)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ if (bulk_params->mode != VCHIQ_BULK_MODE_CALLBACK &&
+ bulk_params->mode != VCHIQ_BULK_MODE_NOCALLBACK)
+ goto error_exit;
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
+
+ if (!bulk_params->offset && !bulk_params->uoffset)
+ goto error_exit;
+
+ if (vchiq_check_service(service))
+ goto error_exit;
+
+ status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params);
+
+error_exit:
+ vchiq_service_put(service);
+
+ return status;
+}
+
+/*
+ * This function is called by VCHIQ ioctl interface and is interruptible.
+ * It may receive -EAGAIN to indicate that a signal has been received
+ * and the call should be retried after being returned to user context.
+ */
+int
+vchiq_bulk_xfer_waiting(struct vchiq_instance *instance,
+ unsigned int handle, struct bulk_waiter *waiter)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct bulk_waiter *bulk_waiter;
+ int status = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ if (!waiter)
+ goto error_exit;
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
+
+ if (vchiq_check_service(service))
+ goto error_exit;
+
+ bulk_waiter = waiter;
+
+ vchiq_service_put(service);
+
+ status = 0;
+
+ if (wait_for_completion_killable(&bulk_waiter->event))
+ return -EINTR;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return -EINVAL;
+
+ return status;
+
+error_exit:
+ vchiq_service_put(service);
+
+ return status;
+}
+
+int
+vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = -EINVAL;
+ int data_id;
+
+ if (!service)
+ goto error_exit;
+
+ if (vchiq_check_service(service))
+ goto error_exit;
+
+ if (!size) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+
+ if (size > VCHIQ_MAX_MSG_SIZE) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+
+ data_id = MAKE_DATA(service->localport, service->remoteport);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ status = queue_message(service->state, service, data_id,
+ copy_callback, context, size,
+ QMFLAGS_IS_BLOCKING);
+ break;
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ status = queue_message_sync(service->state, service, data_id,
+ copy_callback, context, size);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+
+error_exit:
+ if (service)
+ vchiq_service_put(service);
+
+ return status;
+}
+
+int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
+ unsigned int size)
+{
+ return vchiq_queue_message(instance, handle, memcpy_copy_callback,
+ data, size);
+}
+EXPORT_SYMBOL(vchiq_queue_kernel_message);
+
+void
+vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct vchiq_shared_state *remote;
+ struct vchiq_state *state;
+ int slot_index;
+
+ if (!service)
+ return;
+
+ state = service->state;
+ remote = state->remote;
+
+ slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
+
+ if ((slot_index >= remote->slot_first) &&
+ (slot_index <= remote->slot_last)) {
+ int msgid = header->msgid;
+
+ if (msgid & VCHIQ_MSGID_CLAIMED) {
+ struct vchiq_slot_info *slot_info =
+ SLOT_INFO_FROM_INDEX(state, slot_index);
+
+ release_slot(state, slot_info, header, service);
+ }
+ } else if (slot_index == remote->slot_sync) {
+ release_message_sync(state, header);
+ }
+
+ vchiq_service_put(service);
+}
+EXPORT_SYMBOL(vchiq_release_message);
+
+static void
+release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
+{
+ header->msgid = VCHIQ_MSGID_PADDING;
+ remote_event_signal(state, &state->remote->sync_release);
+}
+
+int
+vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
+{
+ int status = -EINVAL;
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+
+ if (!service)
+ goto exit;
+
+ if (vchiq_check_service(service))
+ goto exit;
+
+ if (!peer_version)
+ goto exit;
+
+ *peer_version = service->peer_version;
+ status = 0;
+
+exit:
+ if (service)
+ vchiq_service_put(service);
+ return status;
+}
+EXPORT_SYMBOL(vchiq_get_peer_version);
+
+void vchiq_get_config(struct vchiq_config *config)
+{
+ config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
+ config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
+ config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
+ config->max_services = VCHIQ_MAX_SERVICES;
+ config->version = VCHIQ_VERSION;
+ config->version_min = VCHIQ_VERSION_MIN;
+}
+
+int
+vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
+ enum vchiq_service_option option, int value)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct vchiq_service_quota *quota;
+ int ret = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ switch (option) {
+ case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
+ service->auto_close = value;
+ ret = 0;
+ break;
+
+ case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
+ quota = &service->state->service_quotas[service->localport];
+ if (value == 0)
+ value = service->state->default_slot_quota;
+ if ((value >= quota->slot_use_count) &&
+ (value < (unsigned short)~0)) {
+ quota->slot_quota = value;
+ if ((value >= quota->slot_use_count) &&
+ (quota->message_quota >= quota->message_use_count))
+ /*
+ * Signal the service that it may have
+ * dropped below its quota
+ */
+ complete(&quota->quota_event);
+ ret = 0;
+ }
+ break;
+
+ case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
+ quota = &service->state->service_quotas[service->localport];
+ if (value == 0)
+ value = service->state->default_message_quota;
+ if ((value >= quota->message_use_count) &&
+ (value < (unsigned short)~0)) {
+ quota->message_quota = value;
+ if ((value >= quota->message_use_count) &&
+ (quota->slot_quota >= quota->slot_use_count))
+ /*
+ * Signal the service that it may have
+ * dropped below its quota
+ */
+ complete(&quota->quota_event);
+ ret = 0;
+ }
+ break;
+
+ case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
+ service->sync = value;
+ ret = 0;
+ }
+ break;
+
+ case VCHIQ_SERVICE_OPTION_TRACE:
+ service->trace = value;
+ ret = 0;
+ break;
+
+ default:
+ break;
+ }
+ vchiq_service_put(service);
+
+ return ret;
+}
+
+static void
+vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
+ struct vchiq_shared_state *shared, const char *label)
+{
+ static const char *const debug_names[] = {
+ "<entries>",
+ "SLOT_HANDLER_COUNT",
+ "SLOT_HANDLER_LINE",
+ "PARSE_LINE",
+ "PARSE_HEADER",
+ "PARSE_MSGID",
+ "AWAIT_COMPLETION_LINE",
+ "DEQUEUE_MESSAGE_LINE",
+ "SERVICE_CALLBACK_LINE",
+ "MSG_QUEUE_FULL_COUNT",
+ "COMPLETION_QUEUE_FULL_COUNT"
+ };
+ int i;
+
+ seq_printf(f, " %s: slots %d-%d tx_pos=0x%x recycle=0x%x\n",
+ label, shared->slot_first, shared->slot_last,
+ shared->tx_pos, shared->slot_queue_recycle);
+
+ seq_puts(f, " Slots claimed:\n");
+
+ for (i = shared->slot_first; i <= shared->slot_last; i++) {
+ struct vchiq_slot_info slot_info =
+ *SLOT_INFO_FROM_INDEX(state, i);
+ if (slot_info.use_count != slot_info.release_count) {
+ seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count,
+ slot_info.release_count);
+ }
+ }
+
+ for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
+ seq_printf(f, " DEBUG: %s = %d(0x%x)\n",
+ debug_names[i], shared->debug[i], shared->debug[i]);
+ }
+}
+
+static void
+vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
+{
+ unsigned int ref_count;
+
+ /*Don't include the lock just taken*/
+ ref_count = kref_read(&service->ref_count) - 1;
+ seq_printf(f, "Service %u: %s (ref %u)", service->localport,
+ srvstate_names[service->srvstate], ref_count);
+
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
+ char remoteport[30];
+ struct vchiq_service_quota *quota =
+ &service->state->service_quotas[service->localport];
+ int fourcc = service->base.fourcc;
+ int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
+
+ if (service->remoteport != VCHIQ_PORT_FREE) {
+ int len2 = scnprintf(remoteport, sizeof(remoteport),
+ "%u", service->remoteport);
+
+ if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
+ scnprintf(remoteport + len2, sizeof(remoteport) - len2,
+ " (client 0x%x)", service->client_id);
+ } else {
+ strscpy(remoteport, "n/a", sizeof(remoteport));
+ }
+
+ seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
+ &fourcc, remoteport,
+ quota->message_use_count, quota->message_quota,
+ quota->slot_use_count, quota->slot_quota);
+
+ tx_pending = service->bulk_tx.local_insert -
+ service->bulk_tx.remote_insert;
+ if (tx_pending) {
+ unsigned int i = BULK_INDEX(service->bulk_tx.remove);
+
+ tx_size = service->bulk_tx.bulks[i].size;
+ }
+
+ rx_pending = service->bulk_rx.local_insert -
+ service->bulk_rx.remote_insert;
+ if (rx_pending) {
+ unsigned int i = BULK_INDEX(service->bulk_rx.remove);
+
+ rx_size = service->bulk_rx.bulks[i].size;
+ }
+
+ seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
+ tx_pending, tx_size, rx_pending, rx_size);
+
+ if (VCHIQ_ENABLE_STATS) {
+ seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+ service->stats.ctrl_tx_count,
+ service->stats.ctrl_tx_bytes,
+ service->stats.ctrl_rx_count,
+ service->stats.ctrl_rx_bytes);
+
+ seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+ service->stats.bulk_tx_count,
+ service->stats.bulk_tx_bytes,
+ service->stats.bulk_rx_count,
+ service->stats.bulk_rx_bytes);
+
+ seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
+ service->stats.quota_stalls,
+ service->stats.slot_stalls,
+ service->stats.bulk_stalls,
+ service->stats.bulk_aborted_count,
+ service->stats.error_count);
+ }
+ }
+
+ vchiq_dump_platform_service_state(f, service);
+}
+
+void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
+{
+ int i;
+
+ seq_printf(f, "State %d: %s\n", state->id,
+ conn_state_names[state->conn_state]);
+
+ seq_printf(f, " tx_pos=0x%x(@%pK), rx_pos=0x%x(@%pK)\n",
+ state->local->tx_pos,
+ state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->rx_pos,
+ state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
+
+ seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION,
+ VCHIQ_VERSION_MIN);
+
+ if (VCHIQ_ENABLE_STATS) {
+ seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+ state->stats.error_count);
+ }
+
+ seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+ state->local_tx_pos) / VCHIQ_SLOT_SIZE,
+ state->data_quota - state->data_use_count,
+ state->local->slot_queue_recycle - state->slot_queue_available,
+ state->stats.slot_stalls, state->stats.data_stalls);
+
+ vchiq_dump_platform_state(f);
+
+ vchiq_dump_shared_state(f, state, state->local, "Local");
+
+ vchiq_dump_shared_state(f, state, state->remote, "Remote");
+
+ vchiq_dump_platform_instances(state, f);
+
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service = find_service_by_port(state, i);
+
+ if (service) {
+ vchiq_dump_service_state(f, service);
+ vchiq_service_put(service);
+ }
+ }
+}
+
+int vchiq_send_remote_use(struct vchiq_state *state)
+{
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
+ return -ENOTCONN;
+
+ return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
+}
+
+int vchiq_send_remote_use_active(struct vchiq_state *state)
+{
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
+ return -ENOTCONN;
+
+ return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
+ NULL, NULL, 0, 0);
+}
+
+void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
+ const void *void_mem, size_t num_bytes)
+{
+ const u8 *mem = void_mem;
+ size_t offset;
+ char line_buf[100];
+ char *s;
+
+ while (num_bytes > 0) {
+ s = line_buf;
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < num_bytes)
+ s += scnprintf(s, 4, "%02x ", mem[offset]);
+ else
+ s += scnprintf(s, 4, " ");
+ }
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < num_bytes) {
+ u8 ch = mem[offset];
+
+ if ((ch < ' ') || (ch > '~'))
+ ch = '.';
+ *s++ = (char)ch;
+ }
+ }
+ *s++ = '\0';
+
+ dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
+
+ addr += 16;
+ mem += 16;
+ if (num_bytes > 16)
+ num_bytes -= 16;
+ else
+ num_bytes = 0;
+ }
+}
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c
new file mode 100644
index 000000000000..c82326a9b6d9
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/raspberrypi/vchiq_core.h>
+#include <linux/raspberrypi/vchiq_arm.h>
+#include <linux/raspberrypi/vchiq_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DEBUGFS_WRITE_BUF_SIZE 256
+
+/* Global 'vchiq' debugfs and clients entry used by all instances */
+static struct dentry *vchiq_dbg_dir;
+static struct dentry *vchiq_dbg_clients;
+
+static int debugfs_usecount_show(struct seq_file *f, void *offset)
+{
+ struct vchiq_instance *instance = f->private;
+ int use_count;
+
+ use_count = vchiq_instance_get_use_count(instance);
+ seq_printf(f, "%d\n", use_count);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_usecount);
+
+static int debugfs_trace_show(struct seq_file *f, void *offset)
+{
+ struct vchiq_instance *instance = f->private;
+ int trace;
+
+ trace = vchiq_instance_get_trace(instance);
+ seq_printf(f, "%s\n", trace ? "Y" : "N");
+
+ return 0;
+}
+
+static int vchiq_dump_show(struct seq_file *f, void *offset)
+{
+ struct vchiq_state *state = f->private;
+
+ vchiq_dump_state(f, state);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vchiq_dump);
+
+static int debugfs_trace_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_trace_show, inode->i_private);
+}
+
+static ssize_t debugfs_trace_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *f = (struct seq_file *)file->private_data;
+ struct vchiq_instance *instance = f->private;
+ char firstchar;
+
+ if (copy_from_user(&firstchar, buffer, 1))
+ return -EFAULT;
+
+ switch (firstchar) {
+ case 'Y':
+ case 'y':
+ case '1':
+ vchiq_instance_set_trace(instance, 1);
+ break;
+ case 'N':
+ case 'n':
+ case '0':
+ vchiq_instance_set_trace(instance, 0);
+ break;
+ default:
+ break;
+ }
+
+ *ppos += count;
+
+ return count;
+}
+
+static const struct file_operations debugfs_trace_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_trace_open,
+ .write = debugfs_trace_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* add an instance (process) to the debugfs entries */
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
+{
+ char pidstr[16];
+ struct dentry *top;
+
+ snprintf(pidstr, sizeof(pidstr), "%d",
+ vchiq_instance_get_pid(instance));
+
+ top = debugfs_create_dir(pidstr, vchiq_dbg_clients);
+
+ debugfs_create_file("use_count", 0444, top, instance,
+ &debugfs_usecount_fops);
+ debugfs_create_file("trace", 0644, top, instance, &debugfs_trace_fops);
+
+ vchiq_instance_get_debugfs_node(instance)->dentry = top;
+}
+
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
+{
+ struct vchiq_debugfs_node *node =
+ vchiq_instance_get_debugfs_node(instance);
+
+ debugfs_remove_recursive(node->dentry);
+}
+
+void vchiq_debugfs_init(struct vchiq_state *state)
+{
+ vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
+ vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
+
+ debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state,
+ &vchiq_dump_fops);
+}
+
+/* remove all the debugfs entries */
+void vchiq_debugfs_deinit(void)
+{
+ debugfs_remove_recursive(vchiq_dbg_dir);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+void vchiq_debugfs_init(struct vchiq_state *state)
+{
+}
+
+void vchiq_debugfs_deinit(void)
+{
+}
+
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
+{
+}
+
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
new file mode 100644
index 000000000000..0f3dde2657d6
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
@@ -0,0 +1,1355 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/miscdevice.h>
+
+#include <linux/raspberrypi/vchiq_core.h>
+#include <linux/raspberrypi/vchiq_arm.h>
+#include <linux/raspberrypi/vchiq_debugfs.h>
+
+#include "vchiq_ioctl.h"
+
+static const char *const ioctl_names[] = {
+ "CONNECT",
+ "SHUTDOWN",
+ "CREATE_SERVICE",
+ "REMOVE_SERVICE",
+ "QUEUE_MESSAGE",
+ "QUEUE_BULK_TRANSMIT",
+ "QUEUE_BULK_RECEIVE",
+ "AWAIT_COMPLETION",
+ "DEQUEUE_MESSAGE",
+ "GET_CLIENT_ID",
+ "GET_CONFIG",
+ "CLOSE_SERVICE",
+ "USE_SERVICE",
+ "RELEASE_SERVICE",
+ "SET_SERVICE_OPTION",
+ "DUMP_PHYS_MEM",
+ "LIB_VERSION",
+ "CLOSE_DELIVERED"
+};
+
+static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
+
+static void
+user_service_free(void *userdata)
+{
+ kfree(userdata);
+}
+
+static void close_delivered(struct user_service *user_service)
+{
+ dev_dbg(user_service->service->state->dev,
+ "arm: (handle=%x)\n", user_service->service->handle);
+
+ if (user_service->close_pending) {
+ /* Allow the underlying service to be culled */
+ vchiq_service_put(user_service->service);
+
+ /* Wake the user-thread blocked in close_ or remove_service */
+ complete(&user_service->close_event);
+
+ user_service->close_pending = 0;
+ }
+}
+
+struct vchiq_io_copy_callback_context {
+ struct vchiq_element *element;
+ size_t element_offset;
+ unsigned long elements_to_go;
+};
+
+static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
+ size_t offset, size_t maxsize)
+{
+ struct vchiq_io_copy_callback_context *cc = context;
+ size_t total_bytes_copied = 0;
+ size_t bytes_this_round;
+
+ while (total_bytes_copied < maxsize) {
+ if (!cc->elements_to_go)
+ return total_bytes_copied;
+
+ if (!cc->element->size) {
+ cc->elements_to_go--;
+ cc->element++;
+ cc->element_offset = 0;
+ continue;
+ }
+
+ bytes_this_round = min(cc->element->size - cc->element_offset,
+ maxsize - total_bytes_copied);
+
+ if (copy_from_user(dest + total_bytes_copied,
+ cc->element->data + cc->element_offset,
+ bytes_this_round))
+ return -EFAULT;
+
+ cc->element_offset += bytes_this_round;
+ total_bytes_copied += bytes_this_round;
+
+ if (cc->element_offset == cc->element->size) {
+ cc->elements_to_go--;
+ cc->element++;
+ cc->element_offset = 0;
+ }
+ }
+
+ return maxsize;
+}
+
+static int
+vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_element *elements, unsigned long count)
+{
+ struct vchiq_io_copy_callback_context context;
+ int status = 0;
+ unsigned long i;
+ size_t total_size = 0;
+
+ context.element = elements;
+ context.element_offset = 0;
+ context.elements_to_go = count;
+
+ for (i = 0; i < count; i++) {
+ if (!elements[i].data && elements[i].size != 0)
+ return -EFAULT;
+
+ total_size += elements[i].size;
+ }
+
+ status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
+ &context, total_size);
+
+ if (status == -EINVAL)
+ return -EIO;
+ else if (status == -EAGAIN)
+ return -EINTR;
+ return 0;
+}
+
+static int vchiq_ioc_create_service(struct vchiq_instance *instance,
+ struct vchiq_create_service *args)
+{
+ struct user_service *user_service = NULL;
+ struct vchiq_service *service;
+ int status = 0;
+ struct vchiq_service_params_kernel params;
+ int srvstate;
+
+ if (args->is_open && !instance->connected)
+ return -ENOTCONN;
+
+ user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
+ if (!user_service)
+ return -ENOMEM;
+
+ if (args->is_open) {
+ srvstate = VCHIQ_SRVSTATE_OPENING;
+ } else {
+ srvstate = instance->connected ?
+ VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
+ }
+
+ params = (struct vchiq_service_params_kernel) {
+ .fourcc = args->params.fourcc,
+ .callback = service_callback,
+ .userdata = user_service,
+ .version = args->params.version,
+ .version_min = args->params.version_min,
+ };
+ service = vchiq_add_service_internal(instance->state, &params,
+ srvstate, instance,
+ user_service_free);
+ if (!service) {
+ kfree(user_service);
+ return -EEXIST;
+ }
+
+ user_service->service = service;
+ user_service->userdata = args->params.userdata;
+ user_service->instance = instance;
+ user_service->is_vchi = (args->is_vchi != 0);
+ user_service->dequeue_pending = 0;
+ user_service->close_pending = 0;
+ user_service->message_available_pos = instance->completion_remove - 1;
+ user_service->msg_insert = 0;
+ user_service->msg_remove = 0;
+ init_completion(&user_service->insert_event);
+ init_completion(&user_service->remove_event);
+ init_completion(&user_service->close_event);
+
+ if (args->is_open) {
+ status = vchiq_open_service_internal(service, instance->pid);
+ if (status) {
+ vchiq_remove_service(instance, service->handle);
+ return (status == -EAGAIN) ?
+ -EINTR : -EIO;
+ }
+ }
+ args->handle = service->handle;
+
+ return 0;
+}
+
+static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
+ struct vchiq_dequeue_message *args)
+{
+ struct user_service *user_service;
+ struct vchiq_service *service;
+ struct vchiq_header *header;
+ int ret;
+
+ DEBUG_INITIALISE(instance->state->local);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ service = find_service_for_instance(instance, args->handle);
+ if (!service)
+ return -EINVAL;
+
+ user_service = (struct user_service *)service->base.userdata;
+ if (user_service->is_vchi == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ spin_lock(&service->state->msg_queue_spinlock);
+ if (user_service->msg_remove == user_service->msg_insert) {
+ if (!args->blocking) {
+ spin_unlock(&service->state->msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ ret = -EWOULDBLOCK;
+ goto out;
+ }
+ user_service->dequeue_pending = 1;
+ ret = 0;
+ do {
+ spin_unlock(&service->state->msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ if (wait_for_completion_interruptible(&user_service->insert_event)) {
+ dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n");
+ ret = -EINTR;
+ break;
+ }
+ spin_lock(&service->state->msg_queue_spinlock);
+ } while (user_service->msg_remove == user_service->msg_insert);
+
+ if (ret)
+ goto out;
+ }
+
+ if (WARN_ON_ONCE((int)(user_service->msg_insert -
+ user_service->msg_remove) < 0)) {
+ spin_unlock(&service->state->msg_queue_spinlock);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ header = user_service->msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&service->state->msg_queue_spinlock);
+
+ complete(&user_service->remove_event);
+ if (!header) {
+ ret = -ENOTCONN;
+ } else if (header->size <= args->bufsize) {
+ /* Copy to user space if msgbuf is not NULL */
+ if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
+ ret = header->size;
+ vchiq_release_message(instance, service->handle, header);
+ } else {
+ ret = -EFAULT;
+ }
+ } else {
+ dev_err(service->state->dev,
+ "arm: header %p: bufsize %x < size %x\n",
+ header, args->bufsize, header->size);
+ WARN(1, "invalid size\n");
+ ret = -EMSGSIZE;
+ }
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+out:
+ vchiq_service_put(service);
+ return ret;
+}
+
+static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ struct vchiq_queue_bulk_transfer *args,
+ enum vchiq_bulk_dir dir,
+ enum vchiq_bulk_mode __user *mode)
+{
+ struct vchiq_service *service;
+ struct bulk_waiter_node *waiter = NULL, *iter;
+ struct vchiq_bulk bulk_params = {};
+ int status = 0;
+ int ret;
+
+ service = find_service_for_instance(instance, args->handle);
+ if (!service)
+ return -EINVAL;
+
+ if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+ if (!waiter) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bulk_params.uoffset = args->data;
+ bulk_params.mode = args->mode;
+ bulk_params.size = args->size;
+ bulk_params.dir = dir;
+ bulk_params.waiter = &waiter->bulk_waiter;
+
+ status = vchiq_bulk_xfer_blocking(instance, args->handle,
+ &bulk_params);
+ } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each_entry(iter, &instance->bulk_waiter_list,
+ list) {
+ if (iter->pid == current->pid) {
+ list_del(&iter->list);
+ waiter = iter;
+ break;
+ }
+ }
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ if (!waiter) {
+ dev_err(service->state->dev,
+ "arm: no bulk_waiter found for pid %d\n", current->pid);
+ ret = -ESRCH;
+ goto out;
+ }
+ dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n",
+ waiter, current->pid);
+
+ status = vchiq_bulk_xfer_waiting(instance, args->handle,
+ &waiter->bulk_waiter);
+ } else {
+ bulk_params.uoffset = args->data;
+ bulk_params.mode = args->mode;
+ bulk_params.size = args->size;
+ bulk_params.dir = dir;
+ bulk_params.cb_userdata = args->userdata;
+
+ status = vchiq_bulk_xfer_callback(instance, args->handle,
+ &bulk_params);
+ }
+
+ if (!waiter) {
+ ret = 0;
+ goto out;
+ }
+
+ if ((status != -EAGAIN) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ if (waiter->bulk_waiter.bulk) {
+ /* Cancel the signal when the transfer completes. */
+ spin_lock(&service->state->bulk_waiter_spinlock);
+ waiter->bulk_waiter.bulk->waiter = NULL;
+ spin_unlock(&service->state->bulk_waiter_spinlock);
+ }
+ kfree(waiter);
+ ret = 0;
+ } else {
+ const enum vchiq_bulk_mode mode_waiting =
+ VCHIQ_BULK_MODE_WAITING;
+ waiter->pid = current->pid;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
+ waiter, current->pid);
+
+ ret = put_user(mode_waiting, mode);
+ }
+out:
+ vchiq_service_put(service);
+ if (ret)
+ return ret;
+ else if (status == -EINVAL)
+ return -EIO;
+ else if (status == -EAGAIN)
+ return -EINTR;
+ return 0;
+}
+
+/* read a user pointer value from an array pointers in user space */
+static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
+{
+ int ret;
+
+ if (in_compat_syscall()) {
+ compat_uptr_t ptr32;
+ compat_uptr_t __user *uptr = ubuf;
+
+ ret = get_user(ptr32, uptr + index);
+ if (ret)
+ return ret;
+
+ *buf = compat_ptr(ptr32);
+ } else {
+ uintptr_t ptr, __user *uptr = ubuf;
+
+ ret = get_user(ptr, uptr + index);
+
+ if (ret)
+ return ret;
+
+ *buf = (void __user *)ptr;
+ }
+
+ return 0;
+}
+
+struct vchiq_completion_data32 {
+ enum vchiq_reason reason;
+ compat_uptr_t header;
+ compat_uptr_t service_userdata;
+ compat_uptr_t cb_data;
+};
+
+static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
+ struct vchiq_completion_data *completion,
+ int index)
+{
+ struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
+
+ if (in_compat_syscall()) {
+ struct vchiq_completion_data32 tmp = {
+ .reason = completion->reason,
+ .header = ptr_to_compat(completion->header),
+ .service_userdata = ptr_to_compat(completion->service_userdata),
+ .cb_data = ptr_to_compat(completion->cb_userdata),
+ };
+ if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(&buf[index], completion, sizeof(*completion)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
+ struct vchiq_await_completion *args,
+ int __user *msgbufcountp)
+{
+ int msgbufcount;
+ int remove;
+ int ret;
+
+ DEBUG_INITIALISE(instance->state->local);
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ if (!instance->connected)
+ return -ENOTCONN;
+
+ mutex_lock(&instance->completion_mutex);
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
+ int rc;
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ mutex_unlock(&instance->completion_mutex);
+ rc = wait_for_completion_interruptible(&instance->insert_event);
+ mutex_lock(&instance->completion_mutex);
+ if (rc) {
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n");
+ ret = -EINTR;
+ goto out;
+ }
+ }
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ msgbufcount = args->msgbufcount;
+ remove = instance->completion_remove;
+
+ for (ret = 0; ret < args->count; ret++) {
+ struct vchiq_completion_data_kernel *completion;
+ struct vchiq_completion_data user_completion;
+ struct vchiq_service *service;
+ struct user_service *user_service;
+ struct vchiq_header *header;
+
+ if (remove == instance->completion_insert)
+ break;
+
+ completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
+
+ /*
+ * A read memory barrier is needed to stop
+ * prefetch of a stale completion record
+ */
+ rmb();
+
+ service = completion->service_userdata;
+ user_service = service->base.userdata;
+
+ memset(&user_completion, 0, sizeof(user_completion));
+ user_completion = (struct vchiq_completion_data) {
+ .reason = completion->reason,
+ .service_userdata = user_service->userdata,
+ };
+
+ header = completion->header;
+ if (header) {
+ void __user *msgbuf;
+ int msglen;
+
+ msglen = header->size + sizeof(struct vchiq_header);
+ /* This must be a VCHIQ-style service */
+ if (args->msgbufsize < msglen) {
+ dev_err(service->state->dev,
+ "arm: header %p: msgbufsize %x < msglen %x\n",
+ header, args->msgbufsize, msglen);
+ WARN(1, "invalid message size\n");
+ if (ret == 0)
+ ret = -EMSGSIZE;
+ break;
+ }
+ if (msgbufcount <= 0)
+ /* Stall here for lack of a buffer for the message. */
+ break;
+ /* Get the pointer from user space */
+ msgbufcount--;
+ if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
+ msgbufcount)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Copy the message to user space */
+ if (copy_to_user(msgbuf, header, msglen)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Now it has been copied, the message can be released. */
+ vchiq_release_message(instance, service->handle, header);
+
+ /* The completion must point to the msgbuf. */
+ user_completion.header = msgbuf;
+ }
+
+ if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
+ !instance->use_close_delivered)
+ vchiq_service_put(service);
+
+ user_completion.cb_userdata = completion->cb_userdata;
+
+ if (vchiq_put_completion(args->buf, &user_completion, ret)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * Ensure that the above copy has completed
+ * before advancing the remove pointer.
+ */
+ mb();
+ remove++;
+ instance->completion_remove = remove;
+ }
+
+ if (msgbufcount != args->msgbufcount) {
+ if (put_user(msgbufcount, msgbufcountp))
+ ret = -EFAULT;
+ }
+out:
+ if (ret)
+ complete(&instance->remove_event);
+ mutex_unlock(&instance->completion_mutex);
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ return ret;
+}
+
+static long
+vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct vchiq_instance *instance = file->private_data;
+ int status = 0;
+ struct vchiq_service *service = NULL;
+ long ret = 0;
+ int i, rc;
+
+ dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance,
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+
+ switch (cmd) {
+ case VCHIQ_IOC_SHUTDOWN:
+ if (!instance->connected)
+ break;
+
+ /* Remove all services */
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i))) {
+ status = vchiq_remove_service(instance, service->handle);
+ vchiq_service_put(service);
+ if (status)
+ break;
+ }
+ service = NULL;
+
+ if (!status) {
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ complete(&instance->insert_event);
+ }
+
+ break;
+
+ case VCHIQ_IOC_CONNECT:
+ if (instance->connected) {
+ ret = -EINVAL;
+ break;
+ }
+ rc = mutex_lock_killable(&instance->state->mutex);
+ if (rc) {
+ dev_err(instance->state->dev,
+ "arm: vchiq: connect: could not lock mutex for state %d: %d\n",
+ instance->state->id, rc);
+ ret = -EINTR;
+ break;
+ }
+ status = vchiq_connect_internal(instance->state, instance);
+ mutex_unlock(&instance->state->mutex);
+
+ if (!status)
+ instance->connected = 1;
+ else
+ dev_err(instance->state->dev,
+ "arm: vchiq: could not connect: %d\n", status);
+ break;
+
+ case VCHIQ_IOC_CREATE_SERVICE: {
+ struct vchiq_create_service __user *argp;
+ struct vchiq_create_service args;
+
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vchiq_ioc_create_service(instance, &args);
+ if (ret < 0)
+ break;
+
+ if (put_user(args.handle, &argp->handle)) {
+ vchiq_remove_service(instance, args.handle);
+ ret = -EFAULT;
+ }
+ } break;
+
+ case VCHIQ_IOC_CLOSE_SERVICE:
+ case VCHIQ_IOC_REMOVE_SERVICE: {
+ unsigned int handle = (unsigned int)arg;
+ struct user_service *user_service;
+
+ service = find_service_for_instance(instance, handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ user_service = service->base.userdata;
+
+ /*
+ * close_pending is false on first entry, and when the
+ * wait in vchiq_close_service has been interrupted.
+ */
+ if (!user_service->close_pending) {
+ status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
+ vchiq_close_service(instance, service->handle) :
+ vchiq_remove_service(instance, service->handle);
+ if (status)
+ break;
+ }
+
+ /*
+ * close_pending is true once the underlying service
+ * has been closed until the client library calls the
+ * CLOSE_DELIVERED ioctl, signalling close_event.
+ */
+ if (user_service->close_pending &&
+ wait_for_completion_interruptible(&user_service->close_event))
+ status = -EAGAIN;
+ break;
+ }
+
+ case VCHIQ_IOC_USE_SERVICE:
+ case VCHIQ_IOC_RELEASE_SERVICE: {
+ unsigned int handle = (unsigned int)arg;
+
+ service = find_service_for_instance(instance, handle);
+ if (service) {
+ ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ vchiq_use_service_internal(service) :
+ vchiq_release_service_internal(service);
+ if (ret) {
+ dev_err(instance->state->dev,
+ "suspend: cmd %s returned error %ld for service %p4cc:%03d\n",
+ (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ "VCHIQ_IOC_USE_SERVICE" :
+ "VCHIQ_IOC_RELEASE_SERVICE",
+ ret, &service->base.fourcc,
+ service->client_id);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ } break;
+
+ case VCHIQ_IOC_QUEUE_MESSAGE: {
+ struct vchiq_queue_message args;
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ service = find_service_for_instance(instance, args.handle);
+
+ if (service && (args.count <= MAX_ELEMENTS)) {
+ /* Copy elements into kernel space */
+ struct vchiq_element elements[MAX_ELEMENTS];
+
+ if (copy_from_user(elements, args.elements,
+ args.count * sizeof(struct vchiq_element)) == 0)
+ ret = vchiq_ioc_queue_message(instance, args.handle, elements,
+ args.count);
+ else
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ } break;
+
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
+ struct vchiq_queue_bulk_transfer args;
+ struct vchiq_queue_bulk_transfer __user *argp;
+
+ enum vchiq_bulk_dir dir =
+ (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
+ dir, &argp->mode);
+ } break;
+
+ case VCHIQ_IOC_AWAIT_COMPLETION: {
+ struct vchiq_await_completion args;
+ struct vchiq_await_completion __user *argp;
+
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vchiq_ioc_await_completion(instance, &args,
+ &argp->msgbufcount);
+ } break;
+
+ case VCHIQ_IOC_DEQUEUE_MESSAGE: {
+ struct vchiq_dequeue_message args;
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = vchiq_ioc_dequeue_message(instance, &args);
+ } break;
+
+ case VCHIQ_IOC_GET_CLIENT_ID: {
+ unsigned int handle = (unsigned int)arg;
+
+ ret = vchiq_get_client_id(instance, handle);
+ } break;
+
+ case VCHIQ_IOC_GET_CONFIG: {
+ struct vchiq_get_config args;
+ struct vchiq_config config;
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (args.config_size > sizeof(config)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ vchiq_get_config(&config);
+ if (copy_to_user(args.pconfig, &config, args.config_size)) {
+ ret = -EFAULT;
+ break;
+ }
+ } break;
+
+ case VCHIQ_IOC_SET_SERVICE_OPTION: {
+ struct vchiq_set_service_option args;
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = vchiq_set_service_option(instance, args.handle, args.option,
+ args.value);
+ } break;
+
+ case VCHIQ_IOC_LIB_VERSION: {
+ unsigned int lib_version = (unsigned int)arg;
+
+ if (lib_version < VCHIQ_VERSION_MIN)
+ ret = -EINVAL;
+ else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
+ instance->use_close_delivered = 1;
+ } break;
+
+ case VCHIQ_IOC_CLOSE_DELIVERED: {
+ unsigned int handle = (unsigned int)arg;
+
+ service = find_closed_service_for_instance(instance, handle);
+ if (service) {
+ struct user_service *user_service =
+ (struct user_service *)service->base.userdata;
+ close_delivered(user_service);
+ } else {
+ ret = -EINVAL;
+ }
+ } break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ if (service)
+ vchiq_service_put(service);
+
+ if (ret == 0) {
+ if (status == -EINVAL)
+ ret = -EIO;
+ else if (status == -EAGAIN)
+ ret = -EINTR;
+ }
+
+ if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
+ dev_dbg(instance->state->dev,
+ "arm: ioctl instance %p, cmd %s -> status %d, %ld\n",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ } else {
+ dev_dbg(instance->state->dev,
+ "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_COMPAT)
+
+struct vchiq_service_params32 {
+ int fourcc;
+ compat_uptr_t callback;
+ compat_uptr_t userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
+struct vchiq_create_service32 {
+ struct vchiq_service_params32 params;
+ int is_open;
+ int is_vchi;
+ unsigned int handle; /* OUT */
+};
+
+#define VCHIQ_IOC_CREATE_SERVICE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
+
+static long
+vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
+ struct vchiq_create_service32 __user *ptrargs32)
+{
+ struct vchiq_create_service args;
+ struct vchiq_create_service32 args32;
+ struct vchiq_instance *instance = file->private_data;
+ long ret;
+
+ if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
+ return -EFAULT;
+
+ args = (struct vchiq_create_service) {
+ .params = {
+ .fourcc = args32.params.fourcc,
+ .callback = compat_ptr(args32.params.callback),
+ .userdata = compat_ptr(args32.params.userdata),
+ .version = args32.params.version,
+ .version_min = args32.params.version_min,
+ },
+ .is_open = args32.is_open,
+ .is_vchi = args32.is_vchi,
+ .handle = args32.handle,
+ };
+
+ ret = vchiq_ioc_create_service(instance, &args);
+ if (ret < 0)
+ return ret;
+
+ if (put_user(args.handle, &ptrargs32->handle)) {
+ vchiq_remove_service(instance, args.handle);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+struct vchiq_element32 {
+ compat_uptr_t data;
+ unsigned int size;
+};
+
+struct vchiq_queue_message32 {
+ unsigned int handle;
+ unsigned int count;
+ compat_uptr_t elements;
+};
+
+#define VCHIQ_IOC_QUEUE_MESSAGE32 \
+ _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
+
+static long
+vchiq_compat_ioctl_queue_message(struct file *file,
+ unsigned int cmd,
+ struct vchiq_queue_message32 __user *arg)
+{
+ struct vchiq_queue_message args;
+ struct vchiq_queue_message32 args32;
+ struct vchiq_service *service;
+ struct vchiq_instance *instance = file->private_data;
+ int ret;
+
+ if (copy_from_user(&args32, arg, sizeof(args32)))
+ return -EFAULT;
+
+ args = (struct vchiq_queue_message) {
+ .handle = args32.handle,
+ .count = args32.count,
+ .elements = compat_ptr(args32.elements),
+ };
+
+ if (args32.count > MAX_ELEMENTS)
+ return -EINVAL;
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service)
+ return -EINVAL;
+
+ if (args32.elements && args32.count) {
+ struct vchiq_element32 element32[MAX_ELEMENTS];
+ struct vchiq_element elements[MAX_ELEMENTS];
+ unsigned int count;
+
+ if (copy_from_user(&element32, args.elements,
+ sizeof(element32))) {
+ vchiq_service_put(service);
+ return -EFAULT;
+ }
+
+ for (count = 0; count < args32.count; count++) {
+ elements[count].data =
+ compat_ptr(element32[count].data);
+ elements[count].size = element32[count].size;
+ }
+ ret = vchiq_ioc_queue_message(instance, args.handle, elements,
+ args.count);
+ } else {
+ ret = -EINVAL;
+ }
+ vchiq_service_put(service);
+
+ return ret;
+}
+
+struct vchiq_queue_bulk_transfer32 {
+ unsigned int handle;
+ compat_uptr_t data;
+ unsigned int size;
+ compat_uptr_t userdata;
+ enum vchiq_bulk_mode mode;
+};
+
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
+
+static long
+vchiq_compat_ioctl_queue_bulk(struct file *file,
+ unsigned int cmd,
+ struct vchiq_queue_bulk_transfer32 __user *argp)
+{
+ struct vchiq_queue_bulk_transfer32 args32;
+ struct vchiq_queue_bulk_transfer args;
+ enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+
+ if (copy_from_user(&args32, argp, sizeof(args32)))
+ return -EFAULT;
+
+ args = (struct vchiq_queue_bulk_transfer) {
+ .handle = args32.handle,
+ .data = compat_ptr(args32.data),
+ .size = args32.size,
+ .userdata = compat_ptr(args32.userdata),
+ .mode = args32.mode,
+ };
+
+ return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
+ dir, &argp->mode);
+}
+
+struct vchiq_await_completion32 {
+ unsigned int count;
+ compat_uptr_t buf;
+ unsigned int msgbufsize;
+ unsigned int msgbufcount; /* IN/OUT */
+ compat_uptr_t msgbufs;
+};
+
+#define VCHIQ_IOC_AWAIT_COMPLETION32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
+
+static long
+vchiq_compat_ioctl_await_completion(struct file *file,
+ unsigned int cmd,
+ struct vchiq_await_completion32 __user *argp)
+{
+ struct vchiq_await_completion args;
+ struct vchiq_await_completion32 args32;
+
+ if (copy_from_user(&args32, argp, sizeof(args32)))
+ return -EFAULT;
+
+ args = (struct vchiq_await_completion) {
+ .count = args32.count,
+ .buf = compat_ptr(args32.buf),
+ .msgbufsize = args32.msgbufsize,
+ .msgbufcount = args32.msgbufcount,
+ .msgbufs = compat_ptr(args32.msgbufs),
+ };
+
+ return vchiq_ioc_await_completion(file->private_data, &args,
+ &argp->msgbufcount);
+}
+
+struct vchiq_dequeue_message32 {
+ unsigned int handle;
+ int blocking;
+ unsigned int bufsize;
+ compat_uptr_t buf;
+};
+
+#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
+
+static long
+vchiq_compat_ioctl_dequeue_message(struct file *file,
+ unsigned int cmd,
+ struct vchiq_dequeue_message32 __user *arg)
+{
+ struct vchiq_dequeue_message32 args32;
+ struct vchiq_dequeue_message args;
+
+ if (copy_from_user(&args32, arg, sizeof(args32)))
+ return -EFAULT;
+
+ args = (struct vchiq_dequeue_message) {
+ .handle = args32.handle,
+ .blocking = args32.blocking,
+ .bufsize = args32.bufsize,
+ .buf = compat_ptr(args32.buf),
+ };
+
+ return vchiq_ioc_dequeue_message(file->private_data, &args);
+}
+
+struct vchiq_get_config32 {
+ unsigned int config_size;
+ compat_uptr_t pconfig;
+};
+
+#define VCHIQ_IOC_GET_CONFIG32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
+
+static long
+vchiq_compat_ioctl_get_config(struct file *file,
+ unsigned int cmd,
+ struct vchiq_get_config32 __user *arg)
+{
+ struct vchiq_get_config32 args32;
+ struct vchiq_config config;
+ void __user *ptr;
+
+ if (copy_from_user(&args32, arg, sizeof(args32)))
+ return -EFAULT;
+ if (args32.config_size > sizeof(config))
+ return -EINVAL;
+
+ vchiq_get_config(&config);
+ ptr = compat_ptr(args32.pconfig);
+ if (copy_to_user(ptr, &config, args32.config_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = compat_ptr(arg);
+
+ switch (cmd) {
+ case VCHIQ_IOC_CREATE_SERVICE32:
+ return vchiq_compat_ioctl_create_service(file, cmd, argp);
+ case VCHIQ_IOC_QUEUE_MESSAGE32:
+ return vchiq_compat_ioctl_queue_message(file, cmd, argp);
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
+ return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
+ case VCHIQ_IOC_AWAIT_COMPLETION32:
+ return vchiq_compat_ioctl_await_completion(file, cmd, argp);
+ case VCHIQ_IOC_DEQUEUE_MESSAGE32:
+ return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
+ case VCHIQ_IOC_GET_CONFIG32:
+ return vchiq_compat_ioctl_get_config(file, cmd, argp);
+ default:
+ return vchiq_ioctl(file, cmd, (unsigned long)argp);
+ }
+}
+
+#endif
+
+static int vchiq_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *vchiq_miscdev = file->private_data;
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(vchiq_miscdev->parent);
+ struct vchiq_state *state = &mgmt->state;
+ struct vchiq_instance *instance;
+
+ dev_dbg(state->dev, "arm: vchiq open\n");
+
+ if (!vchiq_remote_initialised(state)) {
+ dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n");
+ return -ENOTCONN;
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+
+ instance->state = state;
+ instance->pid = current->tgid;
+
+ vchiq_debugfs_add_instance(instance);
+
+ init_completion(&instance->insert_event);
+ init_completion(&instance->remove_event);
+ mutex_init(&instance->completion_mutex);
+ mutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ file->private_data = instance;
+
+ return 0;
+}
+
+static int vchiq_release(struct inode *inode, struct file *file)
+{
+ struct vchiq_instance *instance = file->private_data;
+ struct vchiq_state *state = instance->state;
+ struct vchiq_service *service;
+ int ret = 0;
+ int i;
+
+ dev_dbg(state->dev, "arm: instance=%p\n", instance);
+
+ if (!vchiq_remote_initialised(state)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Ensure videocore is awake to allow termination. */
+ vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
+
+ mutex_lock(&instance->completion_mutex);
+
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ complete(&instance->insert_event);
+
+ mutex_unlock(&instance->completion_mutex);
+
+ /* Wake the slot handler if the completion queue is full. */
+ complete(&instance->remove_event);
+
+ /* Mark all services for termination... */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i))) {
+ struct user_service *user_service = service->base.userdata;
+
+ /* Wake the slot handler if the msg queue is full. */
+ complete(&user_service->remove_event);
+
+ vchiq_terminate_service_internal(service);
+ vchiq_service_put(service);
+ }
+
+ /* ...and wait for them to die */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i))) {
+ struct user_service *user_service = service->base.userdata;
+
+ wait_for_completion(&service->remove_event);
+
+ if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ vchiq_service_put(service);
+ break;
+ }
+
+ spin_lock(&service->state->msg_queue_spinlock);
+
+ while (user_service->msg_remove != user_service->msg_insert) {
+ struct vchiq_header *header;
+ int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
+
+ header = user_service->msg_queue[m];
+ user_service->msg_remove++;
+ spin_unlock(&service->state->msg_queue_spinlock);
+
+ if (header)
+ vchiq_release_message(instance, service->handle, header);
+ spin_lock(&service->state->msg_queue_spinlock);
+ }
+
+ spin_unlock(&service->state->msg_queue_spinlock);
+
+ vchiq_service_put(service);
+ }
+
+ /* Release any closed services */
+ while (instance->completion_remove != instance->completion_insert) {
+ struct vchiq_completion_data_kernel *completion;
+ struct vchiq_service *service;
+
+ completion = &instance->completions[instance->completion_remove
+ & (MAX_COMPLETIONS - 1)];
+ service = completion->service_userdata;
+ if (completion->reason == VCHIQ_SERVICE_CLOSED) {
+ struct user_service *user_service =
+ service->base.userdata;
+
+ /* Wake any blocked user-thread */
+ if (instance->use_close_delivered)
+ complete(&user_service->close_event);
+ vchiq_service_put(service);
+ }
+ instance->completion_remove++;
+ }
+
+ /* Release the PEER service count. */
+ vchiq_release_internal(instance->state, NULL);
+
+ free_bulk_waiter(instance);
+
+ vchiq_debugfs_remove_instance(instance);
+
+ kfree(instance);
+ file->private_data = NULL;
+
+out:
+ return ret;
+}
+
+static const struct file_operations
+vchiq_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vchiq_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = vchiq_compat_ioctl,
+#endif
+ .open = vchiq_open,
+ .release = vchiq_release,
+};
+
+static struct miscdevice vchiq_miscdev = {
+ .fops = &vchiq_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vchiq",
+
+};
+
+/**
+ * vchiq_register_chrdev - Register the char driver for vchiq
+ * and create the necessary class and
+ * device files in userspace.
+ * @parent: The parent of the char device.
+ *
+ * Returns 0 on success else returns the error code.
+ */
+int vchiq_register_chrdev(struct device *parent)
+{
+ vchiq_miscdev.parent = parent;
+
+ return misc_register(&vchiq_miscdev);
+}
+
+/**
+ * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char
+ * driver and device files
+ */
+void vchiq_deregister_chrdev(void)
+{
+ misc_deregister(&vchiq_miscdev);
+}
diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h
new file mode 100644
index 000000000000..d0c759f6d8ea
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_IOCTLS_H
+#define VCHIQ_IOCTLS_H
+
+#include <linux/ioctl.h>
+#include <linux/raspberrypi/vchiq.h>
+
+#define VCHIQ_IOC_MAGIC 0xc4
+#define VCHIQ_INVALID_HANDLE (~0)
+
+struct vchiq_service_params {
+ int fourcc;
+ int __user (*callback)(enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *bulk_userdata);
+ void __user *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
+struct vchiq_create_service {
+ struct vchiq_service_params params;
+ int is_open;
+ int is_vchi;
+ unsigned int handle; /* OUT */
+};
+
+struct vchiq_queue_message {
+ unsigned int handle;
+ unsigned int count;
+ const struct vchiq_element __user *elements;
+};
+
+struct vchiq_queue_bulk_transfer {
+ unsigned int handle;
+ void __user *data;
+ unsigned int size;
+ void __user *userdata;
+ enum vchiq_bulk_mode mode;
+};
+
+struct vchiq_completion_data {
+ enum vchiq_reason reason;
+ struct vchiq_header __user *header;
+ void __user *service_userdata;
+ void __user *cb_userdata;
+};
+
+struct vchiq_await_completion {
+ unsigned int count;
+ struct vchiq_completion_data __user *buf;
+ unsigned int msgbufsize;
+ unsigned int msgbufcount; /* IN/OUT */
+ void * __user *msgbufs;
+};
+
+struct vchiq_dequeue_message {
+ unsigned int handle;
+ int blocking;
+ unsigned int bufsize;
+ void __user *buf;
+};
+
+struct vchiq_get_config {
+ unsigned int config_size;
+ struct vchiq_config __user *pconfig;
+};
+
+struct vchiq_set_service_option {
+ unsigned int handle;
+ enum vchiq_service_option option;
+ int value;
+};
+
+struct vchiq_dump_mem {
+ void __user *virt_addr;
+ size_t num_bytes;
+};
+
+#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
+#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
+#define VCHIQ_IOC_CREATE_SERVICE \
+ _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service)
+#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
+#define VCHIQ_IOC_QUEUE_MESSAGE \
+ _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message)
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
+ _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer)
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
+ _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer)
+#define VCHIQ_IOC_AWAIT_COMPLETION \
+ _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion)
+#define VCHIQ_IOC_DEQUEUE_MESSAGE \
+ _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message)
+#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
+#define VCHIQ_IOC_GET_CONFIG \
+ _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config)
+#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
+#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
+#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
+#define VCHIQ_IOC_SET_SERVICE_OPTION \
+ _IOW(VCHIQ_IOC_MAGIC, 14, struct vchiq_set_service_option)
+#define VCHIQ_IOC_DUMP_PHYS_MEM \
+ _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem)
+#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16)
+#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17)
+#define VCHIQ_IOC_MAX 17
+
+#endif
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/Kconfig b/drivers/platform/raspberrypi/vchiq-mmal/Kconfig
new file mode 100644
index 000000000000..c99525a0bb45
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/Kconfig
@@ -0,0 +1,7 @@
+config BCM2835_VCHIQ_MMAL
+ tristate "BCM2835 MMAL VCHIQ service"
+ depends on BCM2835_VCHIQ
+ help
+ Enables the MMAL API over VCHIQ interface as used for the
+ majority of the multimedia services on VideoCore.
+ Defaults to Y when the Broadcomd BCM2835 camera host is selected.
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/Makefile b/drivers/platform/raspberrypi/vchiq-mmal/Makefile
new file mode 100644
index 000000000000..6937f6534c26
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+bcm2835-mmal-vchiq-objs := mmal-vchiq.o
+
+obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += bcm2835-mmal-vchiq.o
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h
new file mode 100644
index 000000000000..b33129403a30
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ *
+ * MMAL structures
+ *
+ */
+#ifndef MMAL_COMMON_H
+#define MMAL_COMMON_H
+
+#define MMAL_FOURCC(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24))
+#define MMAL_MAGIC MMAL_FOURCC('m', 'm', 'a', 'l')
+
+/** Special value signalling that time is not known */
+#define MMAL_TIME_UNKNOWN BIT_ULL(63)
+
+struct mmal_msg_context;
+
+/* mapping between v4l and mmal video modes */
+struct mmal_fmt {
+ u32 fourcc; /* v4l2 format id */
+ int flags; /* v4l2 flags field */
+ u32 mmal;
+ int depth;
+ u32 mmal_component; /* MMAL component index to be used to encode */
+ u32 ybbp; /* depth of first Y plane for planar formats */
+ bool remove_padding; /* Does the GPU have to remove padding,
+ * or can we do hide padding via bytesperline.
+ */
+};
+
+/* buffer for one video frame */
+struct mmal_buffer {
+ /* v4l buffer data -- must be first */
+ struct vb2_v4l2_buffer vb;
+
+ /* list of buffers available */
+ struct list_head list;
+
+ void *buffer; /* buffer pointer */
+ unsigned long buffer_size; /* size of allocated buffer */
+
+ struct mmal_msg_context *msg_context;
+
+ unsigned long length;
+ u32 mmal_flags;
+ s64 dts;
+ s64 pts;
+};
+
+/* */
+struct mmal_colourfx {
+ s32 enable;
+ u32 u;
+ u32 v;
+};
+#endif
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h
new file mode 100644
index 000000000000..e15ae7b24f73
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+#ifndef MMAL_ENCODINGS_H
+#define MMAL_ENCODINGS_H
+
+#define MMAL_ENCODING_H264 MMAL_FOURCC('H', '2', '6', '4')
+#define MMAL_ENCODING_H263 MMAL_FOURCC('H', '2', '6', '3')
+#define MMAL_ENCODING_MP4V MMAL_FOURCC('M', 'P', '4', 'V')
+#define MMAL_ENCODING_MP2V MMAL_FOURCC('M', 'P', '2', 'V')
+#define MMAL_ENCODING_MP1V MMAL_FOURCC('M', 'P', '1', 'V')
+#define MMAL_ENCODING_WMV3 MMAL_FOURCC('W', 'M', 'V', '3')
+#define MMAL_ENCODING_WMV2 MMAL_FOURCC('W', 'M', 'V', '2')
+#define MMAL_ENCODING_WMV1 MMAL_FOURCC('W', 'M', 'V', '1')
+#define MMAL_ENCODING_WVC1 MMAL_FOURCC('W', 'V', 'C', '1')
+#define MMAL_ENCODING_VP8 MMAL_FOURCC('V', 'P', '8', ' ')
+#define MMAL_ENCODING_VP7 MMAL_FOURCC('V', 'P', '7', ' ')
+#define MMAL_ENCODING_VP6 MMAL_FOURCC('V', 'P', '6', ' ')
+#define MMAL_ENCODING_THEORA MMAL_FOURCC('T', 'H', 'E', 'O')
+#define MMAL_ENCODING_SPARK MMAL_FOURCC('S', 'P', 'R', 'K')
+#define MMAL_ENCODING_MJPEG MMAL_FOURCC('M', 'J', 'P', 'G')
+
+#define MMAL_ENCODING_JPEG MMAL_FOURCC('J', 'P', 'E', 'G')
+#define MMAL_ENCODING_GIF MMAL_FOURCC('G', 'I', 'F', ' ')
+#define MMAL_ENCODING_PNG MMAL_FOURCC('P', 'N', 'G', ' ')
+#define MMAL_ENCODING_PPM MMAL_FOURCC('P', 'P', 'M', ' ')
+#define MMAL_ENCODING_TGA MMAL_FOURCC('T', 'G', 'A', ' ')
+#define MMAL_ENCODING_BMP MMAL_FOURCC('B', 'M', 'P', ' ')
+
+#define MMAL_ENCODING_I420 MMAL_FOURCC('I', '4', '2', '0')
+#define MMAL_ENCODING_I420_SLICE MMAL_FOURCC('S', '4', '2', '0')
+#define MMAL_ENCODING_YV12 MMAL_FOURCC('Y', 'V', '1', '2')
+#define MMAL_ENCODING_I422 MMAL_FOURCC('I', '4', '2', '2')
+#define MMAL_ENCODING_I422_SLICE MMAL_FOURCC('S', '4', '2', '2')
+#define MMAL_ENCODING_YUYV MMAL_FOURCC('Y', 'U', 'Y', 'V')
+#define MMAL_ENCODING_YVYU MMAL_FOURCC('Y', 'V', 'Y', 'U')
+#define MMAL_ENCODING_UYVY MMAL_FOURCC('U', 'Y', 'V', 'Y')
+#define MMAL_ENCODING_VYUY MMAL_FOURCC('V', 'Y', 'U', 'Y')
+#define MMAL_ENCODING_NV12 MMAL_FOURCC('N', 'V', '1', '2')
+#define MMAL_ENCODING_NV21 MMAL_FOURCC('N', 'V', '2', '1')
+#define MMAL_ENCODING_ARGB MMAL_FOURCC('A', 'R', 'G', 'B')
+#define MMAL_ENCODING_RGBA MMAL_FOURCC('R', 'G', 'B', 'A')
+#define MMAL_ENCODING_ABGR MMAL_FOURCC('A', 'B', 'G', 'R')
+#define MMAL_ENCODING_BGRA MMAL_FOURCC('B', 'G', 'R', 'A')
+#define MMAL_ENCODING_RGB16 MMAL_FOURCC('R', 'G', 'B', '2')
+#define MMAL_ENCODING_RGB24 MMAL_FOURCC('R', 'G', 'B', '3')
+#define MMAL_ENCODING_RGB32 MMAL_FOURCC('R', 'G', 'B', '4')
+#define MMAL_ENCODING_BGR16 MMAL_FOURCC('B', 'G', 'R', '2')
+#define MMAL_ENCODING_BGR24 MMAL_FOURCC('B', 'G', 'R', '3')
+#define MMAL_ENCODING_BGR32 MMAL_FOURCC('B', 'G', 'R', '4')
+
+/** SAND Video (YUVUV128) format, native format understood by VideoCore.
+ * This format is *not* opaque - if requested you will receive full frames
+ * of YUV_UV video.
+ */
+#define MMAL_ENCODING_YUVUV128 MMAL_FOURCC('S', 'A', 'N', 'D')
+
+/** VideoCore opaque image format, image handles are returned to
+ * the host but not the actual image data.
+ */
+#define MMAL_ENCODING_OPAQUE MMAL_FOURCC('O', 'P', 'Q', 'V')
+
+/** An EGL image handle
+ */
+#define MMAL_ENCODING_EGL_IMAGE MMAL_FOURCC('E', 'G', 'L', 'I')
+
+/* }@ */
+
+/** \name Pre-defined audio encodings */
+/* @{ */
+#define MMAL_ENCODING_PCM_UNSIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'U')
+#define MMAL_ENCODING_PCM_UNSIGNED_LE MMAL_FOURCC('p', 'c', 'm', 'u')
+#define MMAL_ENCODING_PCM_SIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'S')
+#define MMAL_ENCODING_PCM_SIGNED_LE MMAL_FOURCC('p', 'c', 'm', 's')
+#define MMAL_ENCODING_PCM_FLOAT_BE MMAL_FOURCC('P', 'C', 'M', 'F')
+#define MMAL_ENCODING_PCM_FLOAT_LE MMAL_FOURCC('p', 'c', 'm', 'f')
+
+/* Pre-defined H264 encoding variants */
+
+/** ISO 14496-10 Annex B byte stream format */
+#define MMAL_ENCODING_VARIANT_H264_DEFAULT 0
+/** ISO 14496-15 AVC stream format */
+#define MMAL_ENCODING_VARIANT_H264_AVC1 MMAL_FOURCC('A', 'V', 'C', '1')
+/** Implicitly delineated NAL units without emulation prevention */
+#define MMAL_ENCODING_VARIANT_H264_RAW MMAL_FOURCC('R', 'A', 'W', ' ')
+
+/** \defgroup MmalColorSpace List of pre-defined video color spaces
+ * This defines a list of common color spaces. This list isn't exhaustive and
+ * is only provided as a convenience to avoid clients having to use FourCC
+ * codes directly. However components are allowed to define and use their own
+ * FourCC codes.
+ */
+/* @{ */
+
+/** Unknown color space */
+#define MMAL_COLOR_SPACE_UNKNOWN 0
+/** ITU-R BT.601-5 [SDTV] */
+#define MMAL_COLOR_SPACE_ITUR_BT601 MMAL_FOURCC('Y', '6', '0', '1')
+/** ITU-R BT.709-3 [HDTV] */
+#define MMAL_COLOR_SPACE_ITUR_BT709 MMAL_FOURCC('Y', '7', '0', '9')
+/** JPEG JFIF */
+#define MMAL_COLOR_SPACE_JPEG_JFIF MMAL_FOURCC('Y', 'J', 'F', 'I')
+/** Title 47 Code of Federal Regulations (2003) 73.682 (a) (20) */
+#define MMAL_COLOR_SPACE_FCC MMAL_FOURCC('Y', 'F', 'C', 'C')
+/** Society of Motion Picture and Television Engineers 240M (1999) */
+#define MMAL_COLOR_SPACE_SMPTE240M MMAL_FOURCC('Y', '2', '4', '0')
+/** ITU-R BT.470-2 System M */
+#define MMAL_COLOR_SPACE_BT470_2_M MMAL_FOURCC('Y', '_', '_', 'M')
+/** ITU-R BT.470-2 System BG */
+#define MMAL_COLOR_SPACE_BT470_2_BG MMAL_FOURCC('Y', '_', 'B', 'G')
+/** JPEG JFIF, but with 16..255 luma */
+#define MMAL_COLOR_SPACE_JFIF_Y16_255 MMAL_FOURCC('Y', 'Y', '1', '6')
+/* @} MmalColorSpace List */
+
+#endif /* MMAL_ENCODINGS_H */
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h
new file mode 100644
index 000000000000..492d4c5dca08
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+
+#ifndef MMAL_MSG_COMMON_H
+#define MMAL_MSG_COMMON_H
+
+#include <linux/types.h>
+
+enum mmal_msg_status {
+ MMAL_MSG_STATUS_SUCCESS = 0, /**< Success */
+ MMAL_MSG_STATUS_ENOMEM, /**< Out of memory */
+ MMAL_MSG_STATUS_ENOSPC, /**< Out of resources other than memory */
+ MMAL_MSG_STATUS_EINVAL, /**< Argument is invalid */
+ MMAL_MSG_STATUS_ENOSYS, /**< Function not implemented */
+ MMAL_MSG_STATUS_ENOENT, /**< No such file or directory */
+ MMAL_MSG_STATUS_ENXIO, /**< No such device or address */
+ MMAL_MSG_STATUS_EIO, /**< I/O error */
+ MMAL_MSG_STATUS_ESPIPE, /**< Illegal seek */
+ MMAL_MSG_STATUS_ECORRUPT, /**< Data is corrupt \attention */
+ MMAL_MSG_STATUS_ENOTREADY, /**< Component is not ready */
+ MMAL_MSG_STATUS_ECONFIG, /**< Component is not configured */
+ MMAL_MSG_STATUS_EISCONN, /**< Port is already connected */
+ MMAL_MSG_STATUS_ENOTCONN, /**< Port is disconnected */
+ MMAL_MSG_STATUS_EAGAIN, /**< Resource temporarily unavailable. */
+ MMAL_MSG_STATUS_EFAULT, /**< Bad address */
+};
+
+struct mmal_rect {
+ s32 x; /**< x coordinate (from left) */
+ s32 y; /**< y coordinate (from top) */
+ s32 width; /**< width */
+ s32 height; /**< height */
+};
+
+#endif /* MMAL_MSG_COMMON_H */
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h
new file mode 100644
index 000000000000..5569876d8c7d
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+
+#ifndef MMAL_MSG_FORMAT_H
+#define MMAL_MSG_FORMAT_H
+
+#include <linux/math.h>
+
+#include "mmal-msg-common.h"
+
+/* MMAL_ES_FORMAT_T */
+
+struct mmal_audio_format {
+ u32 channels; /* Number of audio channels */
+ u32 sample_rate; /* Sample rate */
+
+ u32 bits_per_sample; /* Bits per sample */
+ u32 block_align; /* Size of a block of data */
+};
+
+struct mmal_video_format {
+ u32 width; /* Width of frame in pixels */
+ u32 height; /* Height of frame in rows of pixels */
+ struct mmal_rect crop; /* Visible region of the frame */
+ struct s32_fract frame_rate; /* Frame rate */
+ struct s32_fract par; /* Pixel aspect ratio */
+
+ /*
+ * FourCC specifying the color space of the video stream. See the
+ * MmalColorSpace "pre-defined color spaces" for some examples.
+ */
+ u32 color_space;
+};
+
+struct mmal_subpicture_format {
+ u32 x_offset;
+ u32 y_offset;
+};
+
+union mmal_es_specific_format {
+ struct mmal_audio_format audio;
+ struct mmal_video_format video;
+ struct mmal_subpicture_format subpicture;
+};
+
+/* Definition of an elementary stream format (MMAL_ES_FORMAT_T) */
+struct mmal_es_format_local {
+ u32 type; /* enum mmal_es_type */
+
+ u32 encoding; /* FourCC specifying encoding of the elementary
+ * stream.
+ */
+ u32 encoding_variant; /* FourCC specifying the specific
+ * encoding variant of the elementary
+ * stream.
+ */
+
+ union mmal_es_specific_format *es; /* Type specific
+ * information for the
+ * elementary stream
+ */
+
+ u32 bitrate; /* Bitrate in bits per second */
+ u32 flags; /* Flags describing properties of the elementary
+ * stream.
+ */
+
+ u32 extradata_size; /* Size of the codec specific data */
+ u8 *extradata; /* Codec specific data */
+};
+
+/* Remote definition of an elementary stream format (MMAL_ES_FORMAT_T) */
+struct mmal_es_format {
+ u32 type; /* enum mmal_es_type */
+
+ u32 encoding; /* FourCC specifying encoding of the elementary
+ * stream.
+ */
+ u32 encoding_variant; /* FourCC specifying the specific
+ * encoding variant of the elementary
+ * stream.
+ */
+
+ u32 es; /* Type specific
+ * information for the
+ * elementary stream
+ */
+
+ u32 bitrate; /* Bitrate in bits per second */
+ u32 flags; /* Flags describing properties of the elementary
+ * stream.
+ */
+
+ u32 extradata_size; /* Size of the codec specific data */
+ u32 extradata; /* Codec specific data */
+};
+
+#endif /* MMAL_MSG_FORMAT_H */
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h
new file mode 100644
index 000000000000..6ee4c1ed7f19
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+
+/* MMAL_PORT_TYPE_T */
+enum mmal_port_type {
+ MMAL_PORT_TYPE_UNKNOWN = 0, /* Unknown port type */
+ MMAL_PORT_TYPE_CONTROL, /* Control port */
+ MMAL_PORT_TYPE_INPUT, /* Input port */
+ MMAL_PORT_TYPE_OUTPUT, /* Output port */
+ MMAL_PORT_TYPE_CLOCK, /* Clock port */
+};
+
+/* The port is pass-through and doesn't need buffer headers allocated */
+#define MMAL_PORT_CAPABILITY_PASSTHROUGH 0x01
+/*
+ *The port wants to allocate the buffer payloads.
+ * This signals a preference that payload allocation should be done
+ * on this port for efficiency reasons.
+ */
+#define MMAL_PORT_CAPABILITY_ALLOCATION 0x02
+/*
+ * The port supports format change events.
+ * This applies to input ports and is used to let the client know
+ * whether the port supports being reconfigured via a format
+ * change event (i.e. without having to disable the port).
+ */
+#define MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE 0x04
+
+/*
+ * mmal port structure (MMAL_PORT_T)
+ *
+ * most elements are informational only, the pointer values for
+ * interogation messages are generally provided as additional
+ * structures within the message. When used to set values only the
+ * buffer_num, buffer_size and userdata parameters are writable.
+ */
+struct mmal_port {
+ u32 priv; /* Private member used by the framework */
+ u32 name; /* Port name. Used for debugging purposes (RO) */
+
+ u32 type; /* Type of the port (RO) enum mmal_port_type */
+ u16 index; /* Index of the port in its type list (RO) */
+ u16 index_all; /* Index of the port in the list of all ports (RO) */
+
+ u32 is_enabled; /* Indicates whether the port is enabled or not (RO) */
+ u32 format; /* Format of the elementary stream */
+
+ u32 buffer_num_min; /* Minimum number of buffers the port
+ * requires (RO). This is set by the
+ * component.
+ */
+
+ u32 buffer_size_min; /* Minimum size of buffers the port
+ * requires (RO). This is set by the
+ * component.
+ */
+
+ u32 buffer_alignment_min;/* Minimum alignment requirement for
+ * the buffers (RO). A value of
+ * zero means no special alignment
+ * requirements. This is set by the
+ * component.
+ */
+
+ u32 buffer_num_recommended; /* Number of buffers the port
+ * recommends for optimal
+ * performance (RO). A value of
+ * zero means no special
+ * recommendation. This is set
+ * by the component.
+ */
+
+ u32 buffer_size_recommended; /* Size of buffers the port
+ * recommends for optimal
+ * performance (RO). A value of
+ * zero means no special
+ * recommendation. This is set
+ * by the component.
+ */
+
+ u32 buffer_num; /* Actual number of buffers the port will use.
+ * This is set by the client.
+ */
+
+ u32 buffer_size; /* Actual maximum size of the buffers that
+ * will be sent to the port. This is set by
+ * the client.
+ */
+
+ u32 component; /* Component this port belongs to (Read Only) */
+
+ u32 userdata; /* Field reserved for use by the client */
+
+ u32 capabilities; /* Flags describing the capabilities of a
+ * port (RO). Bitwise combination of \ref
+ * portcapabilities "Port capabilities"
+ * values.
+ */
+};
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h
new file mode 100644
index 000000000000..1889494425eb
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+
+/*
+ * all the data structures which serialise the MMAL protocol. note
+ * these are directly mapped onto the received message data.
+ *
+ * BEWARE: They seem to *assume* pointers are u32 and that there is no
+ * structure padding!
+ *
+ * NOTE: this implementation uses kernel types to ensure sizes. Rather
+ * than assigning values to enums to force their size the
+ * implementation uses fixed size types and not the enums (though the
+ * comments have the actual enum type
+ */
+#ifndef MMAL_MSG_H
+#define MMAL_MSG_H
+
+#define VC_MMAL_VER 15
+#define VC_MMAL_MIN_VER 10
+
+/* max total message size is 512 bytes */
+#define MMAL_MSG_MAX_SIZE 512
+/* with six 32bit header elements max payload is therefore 488 bytes */
+#define MMAL_MSG_MAX_PAYLOAD 488
+
+#include "mmal-msg-common.h"
+#include "mmal-msg-format.h"
+#include "mmal-msg-port.h"
+#include "mmal-vchiq.h"
+
+enum mmal_msg_type {
+ MMAL_MSG_TYPE_QUIT = 1,
+ MMAL_MSG_TYPE_SERVICE_CLOSED,
+ MMAL_MSG_TYPE_GET_VERSION,
+ MMAL_MSG_TYPE_COMPONENT_CREATE,
+ MMAL_MSG_TYPE_COMPONENT_DESTROY, /* 5 */
+ MMAL_MSG_TYPE_COMPONENT_ENABLE,
+ MMAL_MSG_TYPE_COMPONENT_DISABLE,
+ MMAL_MSG_TYPE_PORT_INFO_GET,
+ MMAL_MSG_TYPE_PORT_INFO_SET,
+ MMAL_MSG_TYPE_PORT_ACTION, /* 10 */
+ MMAL_MSG_TYPE_BUFFER_FROM_HOST,
+ MMAL_MSG_TYPE_BUFFER_TO_HOST,
+ MMAL_MSG_TYPE_GET_STATS,
+ MMAL_MSG_TYPE_PORT_PARAMETER_SET,
+ MMAL_MSG_TYPE_PORT_PARAMETER_GET, /* 15 */
+ MMAL_MSG_TYPE_EVENT_TO_HOST,
+ MMAL_MSG_TYPE_GET_CORE_STATS_FOR_PORT,
+ MMAL_MSG_TYPE_OPAQUE_ALLOCATOR,
+ MMAL_MSG_TYPE_CONSUME_MEM,
+ MMAL_MSG_TYPE_LMK, /* 20 */
+ MMAL_MSG_TYPE_OPAQUE_ALLOCATOR_DESC,
+ MMAL_MSG_TYPE_DRM_GET_LHS32,
+ MMAL_MSG_TYPE_DRM_GET_TIME,
+ MMAL_MSG_TYPE_BUFFER_FROM_HOST_ZEROLEN,
+ MMAL_MSG_TYPE_PORT_FLUSH, /* 25 */
+ MMAL_MSG_TYPE_HOST_LOG,
+ MMAL_MSG_TYPE_MSG_LAST
+};
+
+/* port action request messages differ depending on the action type */
+enum mmal_msg_port_action_type {
+ MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unknown action */
+ MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */
+ MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */
+ MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */
+ MMAL_MSG_PORT_ACTION_TYPE_CONNECT, /* Connect ports */
+ MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT, /* Disconnect ports */
+ MMAL_MSG_PORT_ACTION_TYPE_SET_REQUIREMENTS, /* Set buffer requirements*/
+};
+
+struct mmal_msg_header {
+ u32 magic;
+ u32 type; /* enum mmal_msg_type */
+
+ /* Opaque handle to the control service */
+ u32 control_service;
+
+ u32 context; /* a u32 per message context */
+ u32 status; /* The status of the vchiq operation */
+ u32 padding;
+};
+
+/* Send from VC to host to report version */
+struct mmal_msg_version {
+ u32 flags;
+ u32 major;
+ u32 minor;
+ u32 minimum;
+};
+
+/* request to VC to create component */
+struct mmal_msg_component_create {
+ u32 client_component; /* component context */
+ char name[128];
+ u32 pid; /* For debug */
+};
+
+/* reply from VC to component creation request */
+struct mmal_msg_component_create_reply {
+ u32 status; /* enum mmal_msg_status - how does this differ to
+ * the one in the header?
+ */
+ u32 component_handle; /* VideoCore handle for component */
+ u32 input_num; /* Number of input ports */
+ u32 output_num; /* Number of output ports */
+ u32 clock_num; /* Number of clock ports */
+};
+
+/* request to VC to destroy a component */
+struct mmal_msg_component_destroy {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_destroy_reply {
+ u32 status; /* The component destruction status */
+};
+
+/* request and reply to VC to enable a component */
+struct mmal_msg_component_enable {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_enable_reply {
+ u32 status; /* The component enable status */
+};
+
+/* request and reply to VC to disable a component */
+struct mmal_msg_component_disable {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_disable_reply {
+ u32 status; /* The component disable status */
+};
+
+/* request to VC to get port information */
+struct mmal_msg_port_info_get {
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 index; /* port index to query */
+};
+
+/* reply from VC to get port info request */
+struct mmal_msg_port_info_get_reply {
+ u32 status; /* enum mmal_msg_status */
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 port_index; /* port indexed in query */
+ s32 found; /* unused */
+ u32 port_handle; /* Handle to use for this port */
+ struct mmal_port port;
+ struct mmal_es_format format; /* elementary stream format */
+ union mmal_es_specific_format es; /* es type specific data */
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; /* es extra data */
+};
+
+/* request to VC to set port information */
+struct mmal_msg_port_info_set {
+ u32 component_handle;
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 port_index; /* port indexed in query */
+ struct mmal_port port;
+ struct mmal_es_format format;
+ union mmal_es_specific_format es;
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
+};
+
+/* reply from VC to port info set request */
+struct mmal_msg_port_info_set_reply {
+ u32 status;
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 index; /* port indexed in query */
+ s32 found; /* unused */
+ u32 port_handle; /* Handle to use for this port */
+ struct mmal_port port;
+ struct mmal_es_format format;
+ union mmal_es_specific_format es;
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
+};
+
+/* port action requests that take a mmal_port as a parameter */
+struct mmal_msg_port_action_port {
+ u32 component_handle;
+ u32 port_handle;
+ u32 action; /* enum mmal_msg_port_action_type */
+ struct mmal_port port;
+};
+
+/* port action requests that take handles as a parameter */
+struct mmal_msg_port_action_handle {
+ u32 component_handle;
+ u32 port_handle;
+ u32 action; /* enum mmal_msg_port_action_type */
+ u32 connect_component_handle;
+ u32 connect_port_handle;
+};
+
+struct mmal_msg_port_action_reply {
+ u32 status; /* The port action operation status */
+};
+
+/* MMAL buffer transfer */
+
+/* Size of space reserved in a buffer message for short messages. */
+#define MMAL_VC_SHORT_DATA 128
+
+/* Signals that the current payload is the end of the stream of data */
+#define MMAL_BUFFER_HEADER_FLAG_EOS BIT(0)
+/* Signals that the start of the current payload starts a frame */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_START BIT(1)
+/* Signals that the end of the current payload ends a frame */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_END BIT(2)
+/* Signals that the current payload contains only complete frames (>1) */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME \
+ (MMAL_BUFFER_HEADER_FLAG_FRAME_START | \
+ MMAL_BUFFER_HEADER_FLAG_FRAME_END)
+/* Signals that the current payload is a keyframe (i.e. self decodable) */
+#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME BIT(3)
+/*
+ * Signals a discontinuity in the stream of data (e.g. after a seek).
+ * Can be used for instance by a decoder to reset its state
+ */
+#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY BIT(4)
+/*
+ * Signals a buffer containing some kind of config data for the component
+ * (e.g. codec config data)
+ */
+#define MMAL_BUFFER_HEADER_FLAG_CONFIG BIT(5)
+/* Signals an encrypted payload */
+#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED BIT(6)
+/* Signals a buffer containing side information */
+#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO BIT(7)
+/*
+ * Signals a buffer which is the snapshot/postview image from a stills
+ * capture
+ */
+#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT BIT(8)
+/* Signals a buffer which contains data known to be corrupted */
+#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED BIT(9)
+/* Signals that a buffer failed to be transmitted */
+#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED BIT(10)
+
+struct mmal_driver_buffer {
+ u32 magic;
+ u32 component_handle;
+ u32 port_handle;
+ u32 client_context;
+};
+
+/* buffer header */
+struct mmal_buffer_header {
+ u32 next; /* next header */
+ u32 priv; /* framework private data */
+ u32 cmd;
+ u32 data;
+ u32 alloc_size;
+ u32 length;
+ u32 offset;
+ u32 flags;
+ s64 pts;
+ s64 dts;
+ u32 type;
+ u32 user_data;
+};
+
+struct mmal_buffer_header_type_specific {
+ union {
+ struct {
+ u32 planes;
+ u32 offset[4];
+ u32 pitch[4];
+ u32 flags;
+ } video;
+ } u;
+};
+
+struct mmal_msg_buffer_from_host {
+ /*
+ *The front 32 bytes of the buffer header are copied
+ * back to us in the reply to allow for context. This
+ * area is used to store two mmal_driver_buffer structures to
+ * allow for multiple concurrent service users.
+ */
+ /* control data */
+ struct mmal_driver_buffer drvbuf;
+
+ /* referenced control data for passthrough buffer management */
+ struct mmal_driver_buffer drvbuf_ref;
+ struct mmal_buffer_header buffer_header; /* buffer header itself */
+ struct mmal_buffer_header_type_specific buffer_header_type_specific;
+ s32 is_zero_copy;
+ s32 has_reference;
+
+ /* allows short data to be xfered in control message */
+ u32 payload_in_message;
+ u8 short_data[MMAL_VC_SHORT_DATA];
+};
+
+/* port parameter setting */
+
+#define MMAL_WORKER_PORT_PARAMETER_SPACE 96
+
+struct mmal_msg_port_parameter_set {
+ u32 component_handle; /* component */
+ u32 port_handle; /* port */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+ u32 value[MMAL_WORKER_PORT_PARAMETER_SPACE];
+};
+
+struct mmal_msg_port_parameter_set_reply {
+ u32 status; /* enum mmal_msg_status todo: how does this
+ * differ to the one in the header?
+ */
+};
+
+/* port parameter getting */
+
+struct mmal_msg_port_parameter_get {
+ u32 component_handle; /* component */
+ u32 port_handle; /* port */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+};
+
+struct mmal_msg_port_parameter_get_reply {
+ u32 status; /* Status of mmal_port_parameter_get call */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+ u32 value[MMAL_WORKER_PORT_PARAMETER_SPACE];
+};
+
+/* event messages */
+#define MMAL_WORKER_EVENT_SPACE 256
+
+struct mmal_msg_event_to_host {
+ u32 client_component; /* component context */
+
+ u32 port_type;
+ u32 port_num;
+
+ u32 cmd;
+ u32 length;
+ u8 data[MMAL_WORKER_EVENT_SPACE];
+ u32 delayed_buffer;
+};
+
+/* all mmal messages are serialised through this structure */
+struct mmal_msg {
+ /* header */
+ struct mmal_msg_header h;
+ /* payload */
+ union {
+ struct mmal_msg_version version;
+
+ struct mmal_msg_component_create component_create;
+ struct mmal_msg_component_create_reply component_create_reply;
+
+ struct mmal_msg_component_destroy component_destroy;
+ struct mmal_msg_component_destroy_reply component_destroy_reply;
+
+ struct mmal_msg_component_enable component_enable;
+ struct mmal_msg_component_enable_reply component_enable_reply;
+
+ struct mmal_msg_component_disable component_disable;
+ struct mmal_msg_component_disable_reply component_disable_reply;
+
+ struct mmal_msg_port_info_get port_info_get;
+ struct mmal_msg_port_info_get_reply port_info_get_reply;
+
+ struct mmal_msg_port_info_set port_info_set;
+ struct mmal_msg_port_info_set_reply port_info_set_reply;
+
+ struct mmal_msg_port_action_port port_action_port;
+ struct mmal_msg_port_action_handle port_action_handle;
+ struct mmal_msg_port_action_reply port_action_reply;
+
+ struct mmal_msg_buffer_from_host buffer_from_host;
+
+ struct mmal_msg_port_parameter_set port_parameter_set;
+ struct mmal_msg_port_parameter_set_reply
+ port_parameter_set_reply;
+ struct mmal_msg_port_parameter_get
+ port_parameter_get;
+ struct mmal_msg_port_parameter_get_reply
+ port_parameter_get_reply;
+
+ struct mmal_msg_event_to_host event_to_host;
+
+ u8 payload[MMAL_MSG_MAX_PAYLOAD];
+ } u;
+};
+#endif
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h
new file mode 100644
index 000000000000..a0cdd28101f2
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ */
+
+/* common parameters */
+
+/** @name Parameter groups
+ * Parameters are divided into groups, and then allocated sequentially within
+ * a group using an enum.
+ * @{
+ */
+
+#ifndef MMAL_PARAMETERS_H
+#define MMAL_PARAMETERS_H
+
+#include <linux/math.h>
+
+/** Common parameter ID group, used with many types of component. */
+#define MMAL_PARAMETER_GROUP_COMMON (0 << 16)
+/** Camera-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_CAMERA (1 << 16)
+/** Video-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_VIDEO (2 << 16)
+/** Audio-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_AUDIO (3 << 16)
+/** Clock-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_CLOCK (4 << 16)
+/** Miracast-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_MIRACAST (5 << 16)
+
+/* Common parameters */
+enum mmal_parameter_common_type {
+ /**< Never a valid parameter ID */
+ MMAL_PARAMETER_UNUSED = MMAL_PARAMETER_GROUP_COMMON,
+
+ /**< MMAL_PARAMETER_ENCODING_T */
+ MMAL_PARAMETER_SUPPORTED_ENCODINGS,
+ /**< MMAL_PARAMETER_URI_T */
+ MMAL_PARAMETER_URI,
+ /** MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T */
+ MMAL_PARAMETER_CHANGE_EVENT_REQUEST,
+ /** MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ZERO_COPY,
+ /**< MMAL_PARAMETER_BUFFER_REQUIREMENTS_T */
+ MMAL_PARAMETER_BUFFER_REQUIREMENTS,
+ /**< MMAL_PARAMETER_STATISTICS_T */
+ MMAL_PARAMETER_STATISTICS,
+ /**< MMAL_PARAMETER_CORE_STATISTICS_T */
+ MMAL_PARAMETER_CORE_STATISTICS,
+ /**< MMAL_PARAMETER_MEM_USAGE_T */
+ MMAL_PARAMETER_MEM_USAGE,
+ /**< MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_BUFFER_FLAG_FILTER,
+ /**< MMAL_PARAMETER_SEEK_T */
+ MMAL_PARAMETER_SEEK,
+ /**< MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_POWERMON_ENABLE,
+ /**< MMAL_PARAMETER_LOGGING_T */
+ MMAL_PARAMETER_LOGGING,
+ /**< MMAL_PARAMETER_UINT64_T */
+ MMAL_PARAMETER_SYSTEM_TIME,
+ /**< MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_NO_IMAGE_PADDING,
+};
+
+/* camera parameters */
+
+enum mmal_parameter_camera_type {
+ /* 0 */
+ /** @ref MMAL_PARAMETER_THUMBNAIL_CONFIG_T */
+ MMAL_PARAMETER_THUMBNAIL_CONFIGURATION =
+ MMAL_PARAMETER_GROUP_CAMERA,
+ /**< Unused? */
+ MMAL_PARAMETER_CAPTURE_QUALITY,
+ /**< @ref MMAL_PARAMETER_INT32_T */
+ MMAL_PARAMETER_ROTATION,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_EXIF_DISABLE,
+ /**< @ref MMAL_PARAMETER_EXIF_T */
+ MMAL_PARAMETER_EXIF,
+ /**< @ref MMAL_PARAM_AWBMODE_T */
+ MMAL_PARAMETER_AWB_MODE,
+ /**< @ref MMAL_PARAMETER_IMAGEFX_T */
+ MMAL_PARAMETER_IMAGE_EFFECT,
+ /**< @ref MMAL_PARAMETER_COLOURFX_T */
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ /**< @ref MMAL_PARAMETER_FLICKERAVOID_T */
+ MMAL_PARAMETER_FLICKER_AVOID,
+ /**< @ref MMAL_PARAMETER_FLASH_T */
+ MMAL_PARAMETER_FLASH,
+ /**< @ref MMAL_PARAMETER_REDEYE_T */
+ MMAL_PARAMETER_REDEYE,
+ /**< @ref MMAL_PARAMETER_FOCUS_T */
+ MMAL_PARAMETER_FOCUS,
+ /**< Unused? */
+ MMAL_PARAMETER_FOCAL_LENGTHS,
+ /**< @ref MMAL_PARAMETER_INT32_T */
+ MMAL_PARAMETER_EXPOSURE_COMP,
+ /**< @ref MMAL_PARAMETER_SCALEFACTOR_T */
+ MMAL_PARAMETER_ZOOM,
+ /**< @ref MMAL_PARAMETER_MIRROR_T */
+ MMAL_PARAMETER_MIRROR,
+
+ /* 0x10 */
+ /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAMERA_NUM,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAPTURE,
+ /**< @ref MMAL_PARAMETER_EXPOSUREMODE_T */
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ /**< @ref MMAL_PARAMETER_EXPOSUREMETERINGMODE_T */
+ MMAL_PARAMETER_EXP_METERING_MODE,
+ /**< @ref MMAL_PARAMETER_FOCUS_STATUS_T */
+ MMAL_PARAMETER_FOCUS_STATUS,
+ /**< @ref MMAL_PARAMETER_CAMERA_CONFIG_T */
+ MMAL_PARAMETER_CAMERA_CONFIG,
+ /**< @ref MMAL_PARAMETER_CAPTURE_STATUS_T */
+ MMAL_PARAMETER_CAPTURE_STATUS,
+ /**< @ref MMAL_PARAMETER_FACE_TRACK_T */
+ MMAL_PARAMETER_FACE_TRACK,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS,
+ /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_JPEG_Q_FACTOR,
+ /**< @ref MMAL_PARAMETER_FRAME_RATE_T */
+ MMAL_PARAMETER_FRAME_RATE,
+ /**< @ref MMAL_PARAMETER_CAMERA_STC_MODE_T */
+ MMAL_PARAMETER_USE_STC,
+ /**< @ref MMAL_PARAMETER_CAMERA_INFO_T */
+ MMAL_PARAMETER_CAMERA_INFO,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_STABILISATION,
+ /**< @ref MMAL_PARAMETER_FACE_TRACK_RESULTS_T */
+ MMAL_PARAMETER_FACE_TRACK_RESULTS,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ENABLE_RAW_CAPTURE,
+
+ /* 0x20 */
+ /**< @ref MMAL_PARAMETER_URI_T */
+ MMAL_PARAMETER_DPF_FILE,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ENABLE_DPF_FILE,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_DPF_FAIL_IS_FATAL,
+ /**< @ref MMAL_PARAMETER_CAPTUREMODE_T */
+ MMAL_PARAMETER_CAPTURE_MODE,
+ /**< @ref MMAL_PARAMETER_FOCUS_REGIONS_T */
+ MMAL_PARAMETER_FOCUS_REGIONS,
+ /**< @ref MMAL_PARAMETER_INPUT_CROP_T */
+ MMAL_PARAMETER_INPUT_CROP,
+ /**< @ref MMAL_PARAMETER_SENSOR_INFORMATION_T */
+ MMAL_PARAMETER_SENSOR_INFORMATION,
+ /**< @ref MMAL_PARAMETER_FLASH_SELECT_T */
+ MMAL_PARAMETER_FLASH_SELECT,
+ /**< @ref MMAL_PARAMETER_FIELD_OF_VIEW_T */
+ MMAL_PARAMETER_FIELD_OF_VIEW,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_HIGH_DYNAMIC_RANGE,
+ /**< @ref MMAL_PARAMETER_DRC_T */
+ MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION,
+ /**< @ref MMAL_PARAMETER_ALGORITHM_CONTROL_T */
+ MMAL_PARAMETER_ALGORITHM_CONTROL,
+ /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_SHARPNESS,
+ /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_CONTRAST,
+ /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_BRIGHTNESS,
+ /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_SATURATION,
+
+ /* 0x30 */
+ /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_ISO,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ANTISHAKE,
+ /** @ref MMAL_PARAMETER_IMAGEFX_PARAMETERS_T */
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAMERA_BURST_CAPTURE,
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAMERA_MIN_ISO,
+ /** @ref MMAL_PARAMETER_CAMERA_USE_CASE_T */
+ MMAL_PARAMETER_CAMERA_USE_CASE,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAPTURE_STATS_PASS,
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG,
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ENABLE_REGISTER_FILE,
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_REGISTER_FAIL_IS_FATAL,
+ /** @ref MMAL_PARAMETER_CONFIGFILE_T */
+ MMAL_PARAMETER_CONFIGFILE_REGISTERS,
+ /** @ref MMAL_PARAMETER_CONFIGFILE_CHUNK_T */
+ MMAL_PARAMETER_CONFIGFILE_CHUNK_REGISTERS,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_JPEG_ATTACH_LOG,
+ /**< @ref MMAL_PARAMETER_ZEROSHUTTERLAG_T */
+ MMAL_PARAMETER_ZERO_SHUTTER_LAG,
+ /**< @ref MMAL_PARAMETER_FPS_RANGE_T */
+ MMAL_PARAMETER_FPS_RANGE,
+ /**< @ref MMAL_PARAMETER_INT32_T */
+ MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP,
+
+ /* 0x40 */
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_SW_SHARPEN_DISABLE,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_FLASH_REQUIRED,
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_SW_SATURATION_DISABLE,
+ /**< Takes a @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_SHUTTER_SPEED,
+ /**< Takes a @ref MMAL_PARAMETER_AWB_GAINS_T */
+ MMAL_PARAMETER_CUSTOM_AWB_GAINS,
+};
+
+enum mmal_parameter_camera_config_timestamp_mode {
+ MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
+ MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value
+ * for the frame timestamp
+ */
+ MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, /* Use the STC timestamp
+ * but subtract the
+ * timestamp of the first
+ * frame sent to give a
+ * zero based timestamp.
+ */
+};
+
+struct mmal_parameter_fps_range {
+ /**< Low end of the permitted framerate range */
+ struct s32_fract fps_low;
+ /**< High end of the permitted framerate range */
+ struct s32_fract fps_high;
+};
+
+/* camera configuration parameter */
+struct mmal_parameter_camera_config {
+ /* Parameters for setting up the image pools */
+ u32 max_stills_w; /* Max size of stills capture */
+ u32 max_stills_h;
+ u32 stills_yuv422; /* Allow YUV422 stills capture */
+ u32 one_shot_stills; /* Continuous or one shot stills captures. */
+
+ u32 max_preview_video_w; /* Max size of the preview or video
+ * capture frames
+ */
+ u32 max_preview_video_h;
+ u32 num_preview_video_frames;
+
+ /** Sets the height of the circular buffer for stills capture. */
+ u32 stills_capture_circular_buffer_height;
+
+ /** Allows preview/encode to resume as fast as possible after the stills
+ * input frame has been received, and then processes the still frame in
+ * the background whilst preview/encode has resumed.
+ * Actual mode is controlled by MMAL_PARAMETER_CAPTURE_MODE.
+ */
+ u32 fast_preview_resume;
+
+ /** Selects algorithm for timestamping frames if
+ * there is no clock component connected.
+ * enum mmal_parameter_camera_config_timestamp_mode
+ */
+ s32 use_stc_timestamp;
+};
+
+enum mmal_parameter_exposuremode {
+ MMAL_PARAM_EXPOSUREMODE_OFF,
+ MMAL_PARAM_EXPOSUREMODE_AUTO,
+ MMAL_PARAM_EXPOSUREMODE_NIGHT,
+ MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
+ MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
+ MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
+ MMAL_PARAM_EXPOSUREMODE_SPORTS,
+ MMAL_PARAM_EXPOSUREMODE_SNOW,
+ MMAL_PARAM_EXPOSUREMODE_BEACH,
+ MMAL_PARAM_EXPOSUREMODE_VERYLONG,
+ MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
+ MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
+ MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
+};
+
+enum mmal_parameter_exposuremeteringmode {
+ MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
+};
+
+enum mmal_parameter_awbmode {
+ MMAL_PARAM_AWBMODE_OFF,
+ MMAL_PARAM_AWBMODE_AUTO,
+ MMAL_PARAM_AWBMODE_SUNLIGHT,
+ MMAL_PARAM_AWBMODE_CLOUDY,
+ MMAL_PARAM_AWBMODE_SHADE,
+ MMAL_PARAM_AWBMODE_TUNGSTEN,
+ MMAL_PARAM_AWBMODE_FLUORESCENT,
+ MMAL_PARAM_AWBMODE_INCANDESCENT,
+ MMAL_PARAM_AWBMODE_FLASH,
+ MMAL_PARAM_AWBMODE_HORIZON,
+};
+
+enum mmal_parameter_imagefx {
+ MMAL_PARAM_IMAGEFX_NONE,
+ MMAL_PARAM_IMAGEFX_NEGATIVE,
+ MMAL_PARAM_IMAGEFX_SOLARIZE,
+ MMAL_PARAM_IMAGEFX_POSTERIZE,
+ MMAL_PARAM_IMAGEFX_WHITEBOARD,
+ MMAL_PARAM_IMAGEFX_BLACKBOARD,
+ MMAL_PARAM_IMAGEFX_SKETCH,
+ MMAL_PARAM_IMAGEFX_DENOISE,
+ MMAL_PARAM_IMAGEFX_EMBOSS,
+ MMAL_PARAM_IMAGEFX_OILPAINT,
+ MMAL_PARAM_IMAGEFX_HATCH,
+ MMAL_PARAM_IMAGEFX_GPEN,
+ MMAL_PARAM_IMAGEFX_PASTEL,
+ MMAL_PARAM_IMAGEFX_WATERCOLOUR,
+ MMAL_PARAM_IMAGEFX_FILM,
+ MMAL_PARAM_IMAGEFX_BLUR,
+ MMAL_PARAM_IMAGEFX_SATURATION,
+ MMAL_PARAM_IMAGEFX_COLOURSWAP,
+ MMAL_PARAM_IMAGEFX_WASHEDOUT,
+ MMAL_PARAM_IMAGEFX_POSTERISE,
+ MMAL_PARAM_IMAGEFX_COLOURPOINT,
+ MMAL_PARAM_IMAGEFX_COLOURBALANCE,
+ MMAL_PARAM_IMAGEFX_CARTOON,
+};
+
+enum MMAL_PARAM_FLICKERAVOID {
+ MMAL_PARAM_FLICKERAVOID_OFF,
+ MMAL_PARAM_FLICKERAVOID_AUTO,
+ MMAL_PARAM_FLICKERAVOID_50HZ,
+ MMAL_PARAM_FLICKERAVOID_60HZ,
+ MMAL_PARAM_FLICKERAVOID_MAX = 0x7FFFFFFF
+};
+
+struct mmal_parameter_awbgains {
+ struct s32_fract r_gain; /**< Red gain */
+ struct s32_fract b_gain; /**< Blue gain */
+};
+
+/** Manner of video rate control */
+enum mmal_parameter_rate_control_mode {
+ MMAL_VIDEO_RATECONTROL_DEFAULT,
+ MMAL_VIDEO_RATECONTROL_VARIABLE,
+ MMAL_VIDEO_RATECONTROL_CONSTANT,
+ MMAL_VIDEO_RATECONTROL_VARIABLE_SKIP_FRAMES,
+ MMAL_VIDEO_RATECONTROL_CONSTANT_SKIP_FRAMES
+};
+
+enum mmal_video_profile {
+ MMAL_VIDEO_PROFILE_H263_BASELINE,
+ MMAL_VIDEO_PROFILE_H263_H320CODING,
+ MMAL_VIDEO_PROFILE_H263_BACKWARDCOMPATIBLE,
+ MMAL_VIDEO_PROFILE_H263_ISWV2,
+ MMAL_VIDEO_PROFILE_H263_ISWV3,
+ MMAL_VIDEO_PROFILE_H263_HIGHCOMPRESSION,
+ MMAL_VIDEO_PROFILE_H263_INTERNET,
+ MMAL_VIDEO_PROFILE_H263_INTERLACE,
+ MMAL_VIDEO_PROFILE_H263_HIGHLATENCY,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLESCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_CORE,
+ MMAL_VIDEO_PROFILE_MP4V_MAIN,
+ MMAL_VIDEO_PROFILE_MP4V_NBIT,
+ MMAL_VIDEO_PROFILE_MP4V_SCALABLETEXTURE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLEFACE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLEFBA,
+ MMAL_VIDEO_PROFILE_MP4V_BASICANIMATED,
+ MMAL_VIDEO_PROFILE_MP4V_HYBRID,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDREALTIME,
+ MMAL_VIDEO_PROFILE_MP4V_CORESCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCODING,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCORE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSIMPLE,
+ MMAL_VIDEO_PROFILE_H264_BASELINE,
+ MMAL_VIDEO_PROFILE_H264_MAIN,
+ MMAL_VIDEO_PROFILE_H264_EXTENDED,
+ MMAL_VIDEO_PROFILE_H264_HIGH,
+ MMAL_VIDEO_PROFILE_H264_HIGH10,
+ MMAL_VIDEO_PROFILE_H264_HIGH422,
+ MMAL_VIDEO_PROFILE_H264_HIGH444,
+ MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE,
+ MMAL_VIDEO_PROFILE_DUMMY = 0x7FFFFFFF
+};
+
+enum mmal_video_level {
+ MMAL_VIDEO_LEVEL_H263_10,
+ MMAL_VIDEO_LEVEL_H263_20,
+ MMAL_VIDEO_LEVEL_H263_30,
+ MMAL_VIDEO_LEVEL_H263_40,
+ MMAL_VIDEO_LEVEL_H263_45,
+ MMAL_VIDEO_LEVEL_H263_50,
+ MMAL_VIDEO_LEVEL_H263_60,
+ MMAL_VIDEO_LEVEL_H263_70,
+ MMAL_VIDEO_LEVEL_MP4V_0,
+ MMAL_VIDEO_LEVEL_MP4V_0b,
+ MMAL_VIDEO_LEVEL_MP4V_1,
+ MMAL_VIDEO_LEVEL_MP4V_2,
+ MMAL_VIDEO_LEVEL_MP4V_3,
+ MMAL_VIDEO_LEVEL_MP4V_4,
+ MMAL_VIDEO_LEVEL_MP4V_4a,
+ MMAL_VIDEO_LEVEL_MP4V_5,
+ MMAL_VIDEO_LEVEL_MP4V_6,
+ MMAL_VIDEO_LEVEL_H264_1,
+ MMAL_VIDEO_LEVEL_H264_1b,
+ MMAL_VIDEO_LEVEL_H264_11,
+ MMAL_VIDEO_LEVEL_H264_12,
+ MMAL_VIDEO_LEVEL_H264_13,
+ MMAL_VIDEO_LEVEL_H264_2,
+ MMAL_VIDEO_LEVEL_H264_21,
+ MMAL_VIDEO_LEVEL_H264_22,
+ MMAL_VIDEO_LEVEL_H264_3,
+ MMAL_VIDEO_LEVEL_H264_31,
+ MMAL_VIDEO_LEVEL_H264_32,
+ MMAL_VIDEO_LEVEL_H264_4,
+ MMAL_VIDEO_LEVEL_H264_41,
+ MMAL_VIDEO_LEVEL_H264_42,
+ MMAL_VIDEO_LEVEL_H264_5,
+ MMAL_VIDEO_LEVEL_H264_51,
+ MMAL_VIDEO_LEVEL_DUMMY = 0x7FFFFFFF
+};
+
+struct mmal_parameter_video_profile {
+ enum mmal_video_profile profile;
+ enum mmal_video_level level;
+};
+
+/* video parameters */
+
+enum mmal_parameter_video_type {
+ /** @ref MMAL_DISPLAYREGION_T */
+ MMAL_PARAMETER_DISPLAYREGION = MMAL_PARAMETER_GROUP_VIDEO,
+
+ /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
+ MMAL_PARAMETER_SUPPORTED_PROFILES,
+
+ /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
+ MMAL_PARAMETER_PROFILE,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_INTRAPERIOD,
+
+ /** @ref MMAL_PARAMETER_VIDEO_RATECONTROL_T */
+ MMAL_PARAMETER_RATECONTROL,
+
+ /** @ref MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T */
+ MMAL_PARAMETER_NALUNITFORMAT,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
+
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Setting the value to zero resets to the default (one slice per
+ * frame).
+ */
+ MMAL_PARAMETER_MB_ROWS_PER_SLICE,
+
+ /** @ref MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T */
+ MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION,
+
+ /** @ref MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T */
+ MMAL_PARAMETER_VIDEO_EEDE_ENABLE,
+
+ /** @ref MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T */
+ MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. Request an I-frame. */
+ MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME,
+ /** @ref MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T */
+ MMAL_PARAMETER_VIDEO_INTRA_REFRESH,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. Run-time bit rate control */
+ MMAL_PARAMETER_VIDEO_BIT_RATE,
+
+ /** @ref MMAL_PARAMETER_FRAME_RATE_T */
+ MMAL_PARAMETER_VIDEO_FRAME_RATE,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT,
+
+ /** @ref MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL,
+
+ MMAL_PARAMETER_EXTRA_BUFFERS, /**< @ref MMAL_PARAMETER_UINT32_T. */
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Changing this parameter from the default can reduce frame rate
+ * because image buffers need to be re-pitched.
+ */
+ MMAL_PARAMETER_VIDEO_ALIGN_HORIZ,
+
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Changing this parameter from the default can reduce frame rate
+ * because image buffers need to be re-pitched.
+ */
+ MMAL_PARAMETER_VIDEO_ALIGN_VERT,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT,
+
+ /**< @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_QP_P,
+
+ /**< @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE,
+
+ /* H264 specific parameters */
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_DISABLE_CABAC,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_LATENCY,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_AU_DELIMITERS,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_DEBLOCK_IDC,
+
+ /** @ref MMAL_PARAMETER_VIDEO_ENCODER_H264_MB_INTRA_MODES_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_MB_INTRA_MODE,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_HEADER_ON_OPEN,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_PRECODE_FOR_QP,
+
+ /** @ref MMAL_PARAMETER_VIDEO_DRM_INIT_INFO_T. */
+ MMAL_PARAMETER_VIDEO_DRM_INIT_INFO,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_TIMESTAMP_FIFO,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_DECODE_ERROR_CONCEALMENT,
+
+ /** @ref MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER_T. */
+ MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER,
+
+ /** @ref MMAL_PARAMETER_BYTES_T */
+ MMAL_PARAMETER_VIDEO_DECODE_CONFIG_VD3,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_VCL_HRD_PARAMETERS,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_DELAY_HRD_FLAG,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER
+};
+
+/** Valid mirror modes */
+enum mmal_parameter_mirror {
+ MMAL_PARAM_MIRROR_NONE,
+ MMAL_PARAM_MIRROR_VERTICAL,
+ MMAL_PARAM_MIRROR_HORIZONTAL,
+ MMAL_PARAM_MIRROR_BOTH,
+};
+
+enum mmal_parameter_displaytransform {
+ MMAL_DISPLAY_ROT0 = 0,
+ MMAL_DISPLAY_MIRROR_ROT0 = 1,
+ MMAL_DISPLAY_MIRROR_ROT180 = 2,
+ MMAL_DISPLAY_ROT180 = 3,
+ MMAL_DISPLAY_MIRROR_ROT90 = 4,
+ MMAL_DISPLAY_ROT270 = 5,
+ MMAL_DISPLAY_ROT90 = 6,
+ MMAL_DISPLAY_MIRROR_ROT270 = 7,
+};
+
+enum mmal_parameter_displaymode {
+ MMAL_DISPLAY_MODE_FILL = 0,
+ MMAL_DISPLAY_MODE_LETTERBOX = 1,
+};
+
+enum mmal_parameter_displayset {
+ MMAL_DISPLAY_SET_NONE = 0,
+ MMAL_DISPLAY_SET_NUM = 1,
+ MMAL_DISPLAY_SET_FULLSCREEN = 2,
+ MMAL_DISPLAY_SET_TRANSFORM = 4,
+ MMAL_DISPLAY_SET_DEST_RECT = 8,
+ MMAL_DISPLAY_SET_SRC_RECT = 0x10,
+ MMAL_DISPLAY_SET_MODE = 0x20,
+ MMAL_DISPLAY_SET_PIXEL = 0x40,
+ MMAL_DISPLAY_SET_NOASPECT = 0x80,
+ MMAL_DISPLAY_SET_LAYER = 0x100,
+ MMAL_DISPLAY_SET_COPYPROTECT = 0x200,
+ MMAL_DISPLAY_SET_ALPHA = 0x400,
+};
+
+/* rectangle, used lots so it gets its own struct */
+struct vchiq_mmal_rect {
+ s32 x;
+ s32 y;
+ s32 width;
+ s32 height;
+};
+
+struct mmal_parameter_displayregion {
+ /** Bitfield that indicates which fields are set and should be
+ * used. All other fields will maintain their current value.
+ * \ref MMAL_DISPLAYSET_T defines the bits that can be
+ * combined.
+ */
+ u32 set;
+
+ /** Describes the display output device, with 0 typically
+ * being a directly connected LCD display. The actual values
+ * will depend on the hardware. Code using hard-wired numbers
+ * (e.g. 2) is certain to fail.
+ */
+
+ u32 display_num;
+ /** Indicates that we are using the full device screen area,
+ * rather than a window of the display. If zero, then
+ * dest_rect is used to specify a region of the display to
+ * use.
+ */
+
+ s32 fullscreen;
+ /** Indicates any rotation or flipping used to map frames onto
+ * the natural display orientation.
+ */
+ u32 transform; /* enum mmal_parameter_displaytransform */
+
+ /** Where to display the frame within the screen, if
+ * fullscreen is zero.
+ */
+ struct vchiq_mmal_rect dest_rect;
+
+ /** Indicates which area of the frame to display. If all
+ * values are zero, the whole frame will be used.
+ */
+ struct vchiq_mmal_rect src_rect;
+
+ /** If set to non-zero, indicates that any display scaling
+ * should disregard the aspect ratio of the frame region being
+ * displayed.
+ */
+ s32 noaspect;
+
+ /** Indicates how the image should be scaled to fit the
+ * display. \code MMAL_DISPLAY_MODE_FILL \endcode indicates
+ * that the image should fill the screen by potentially
+ * cropping the frames. Setting \code mode \endcode to \code
+ * MMAL_DISPLAY_MODE_LETTERBOX \endcode indicates that all the
+ * source region should be displayed and black bars added if
+ * necessary.
+ */
+ u32 mode; /* enum mmal_parameter_displaymode */
+
+ /** If non-zero, defines the width of a source pixel relative
+ * to \code pixel_y \endcode. If zero, then pixels default to
+ * being square.
+ */
+ u32 pixel_x;
+
+ /** If non-zero, defines the height of a source pixel relative
+ * to \code pixel_x \endcode. If zero, then pixels default to
+ * being square.
+ */
+ u32 pixel_y;
+
+ /** Sets the relative depth of the images, with greater values
+ * being in front of smaller values.
+ */
+ u32 layer;
+
+ /** Set to non-zero to ensure copy protection is used on
+ * output.
+ */
+ s32 copyprotect_required;
+
+ /** Level of opacity of the layer, where zero is fully
+ * transparent and 255 is fully opaque.
+ */
+ u32 alpha;
+};
+
+#define MMAL_MAX_IMAGEFX_PARAMETERS 5
+
+struct mmal_parameter_imagefx_parameters {
+ enum mmal_parameter_imagefx effect;
+ u32 num_effect_params;
+ u32 effect_parameter[MMAL_MAX_IMAGEFX_PARAMETERS];
+};
+
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS 4
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES 2
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16
+
+struct mmal_parameter_camera_info_camera {
+ u32 port_id;
+ u32 max_width;
+ u32 max_height;
+ u32 lens_present;
+ u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
+};
+
+enum mmal_parameter_camera_info_flash_type {
+ /* Make values explicit to ensure they match values in config ini */
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_XENON = 0,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_LED = 1,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_OTHER = 2,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_MAX = 0x7FFFFFFF
+};
+
+struct mmal_parameter_camera_info_flash {
+ enum mmal_parameter_camera_info_flash_type flash_type;
+};
+
+struct mmal_parameter_camera_info {
+ u32 num_cameras;
+ u32 num_flashes;
+ struct mmal_parameter_camera_info_camera
+ cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS];
+ struct mmal_parameter_camera_info_flash
+ flashes[MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES];
+};
+
+#endif
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
new file mode 100644
index 000000000000..cd073ed3ea2d
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ *
+ * V4L2 driver MMAL vchiq interface code
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/raspberrypi/vchiq.h>
+#include <linux/vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include <linux/raspberrypi/vchiq_arm.h>
+
+#include "mmal-common.h"
+#include "mmal-vchiq.h"
+#include "mmal-msg.h"
+
+/*
+ * maximum number of components supported.
+ * This matches the maximum permitted by default on the VPU
+ */
+#define VCHIQ_MMAL_MAX_COMPONENTS 64
+
+/*
+ * Timeout for synchronous msg responses in seconds.
+ * Helpful to increase this if stopping in the VPU debugger.
+ */
+#define SYNC_MSG_TIMEOUT 3
+
+/*#define FULL_MSG_DUMP 1*/
+
+#ifdef DEBUG
+static const char *const msg_type_names[] = {
+ "UNKNOWN",
+ "QUIT",
+ "SERVICE_CLOSED",
+ "GET_VERSION",
+ "COMPONENT_CREATE",
+ "COMPONENT_DESTROY",
+ "COMPONENT_ENABLE",
+ "COMPONENT_DISABLE",
+ "PORT_INFO_GET",
+ "PORT_INFO_SET",
+ "PORT_ACTION",
+ "BUFFER_FROM_HOST",
+ "BUFFER_TO_HOST",
+ "GET_STATS",
+ "PORT_PARAMETER_SET",
+ "PORT_PARAMETER_GET",
+ "EVENT_TO_HOST",
+ "GET_CORE_STATS_FOR_PORT",
+ "OPAQUE_ALLOCATOR",
+ "CONSUME_MEM",
+ "LMK",
+ "OPAQUE_ALLOCATOR_DESC",
+ "DRM_GET_LHS32",
+ "DRM_GET_TIME",
+ "BUFFER_FROM_HOST_ZEROLEN",
+ "PORT_FLUSH",
+ "HOST_LOG",
+};
+#endif
+
+static const char *const port_action_type_names[] = {
+ "UNKNOWN",
+ "ENABLE",
+ "DISABLE",
+ "FLUSH",
+ "CONNECT",
+ "DISCONNECT",
+ "SET_REQUIREMENTS",
+};
+
+#if defined(DEBUG)
+#if defined(FULL_MSG_DUMP)
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
+ do { \
+ pr_debug(TITLE" type:%s(%d) length:%d\n", \
+ msg_type_names[(MSG)->h.type], \
+ (MSG)->h.type, (MSG_LEN)); \
+ print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
+ 16, 4, (MSG), \
+ sizeof(struct mmal_msg_header), 1); \
+ print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
+ 16, 4, \
+ ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
+ (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
+ } while (0)
+#else
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
+ { \
+ pr_debug(TITLE" type:%s(%d) length:%d\n", \
+ msg_type_names[(MSG)->h.type], \
+ (MSG)->h.type, (MSG_LEN)); \
+ }
+#endif
+#else
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
+#endif
+
+struct vchiq_mmal_instance;
+
+/* normal message context */
+struct mmal_msg_context {
+ struct vchiq_mmal_instance *instance;
+
+ /* Index in the context_map idr so that we can find the
+ * mmal_msg_context again when servicing the VCHI reply.
+ */
+ int handle;
+
+ union {
+ struct {
+ /* work struct for buffer_cb callback */
+ struct work_struct work;
+ /* work struct for deferred callback */
+ struct work_struct buffer_to_host_work;
+ /* mmal instance */
+ struct vchiq_mmal_instance *instance;
+ /* mmal port */
+ struct vchiq_mmal_port *port;
+ /* actual buffer used to store bulk reply */
+ struct mmal_buffer *buffer;
+ /* amount of buffer used */
+ unsigned long buffer_used;
+ /* MMAL buffer flags */
+ u32 mmal_flags;
+ /* Presentation and Decode timestamps */
+ s64 pts;
+ s64 dts;
+
+ int status; /* context status */
+
+ } bulk; /* bulk data */
+
+ struct {
+ /* message handle to release */
+ struct vchiq_header *msg_handle;
+ /* pointer to received message */
+ struct mmal_msg *msg;
+ /* received message length */
+ u32 msg_len;
+ /* completion upon reply */
+ struct completion cmplt;
+ } sync; /* synchronous response */
+ } u;
+
+};
+
+struct vchiq_mmal_instance {
+ unsigned int service_handle;
+
+ /* ensure serialised access to service */
+ struct mutex vchiq_mutex;
+
+ struct idr context_map;
+ /* protect accesses to context_map */
+ struct mutex context_map_lock;
+
+ struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
+
+ /* ordered workqueue to process all bulk operations */
+ struct workqueue_struct *bulk_wq;
+
+ /* handle for a vchiq instance */
+ struct vchiq_instance *vchiq_instance;
+};
+
+static struct mmal_msg_context *
+get_msg_context(struct vchiq_mmal_instance *instance)
+{
+ struct mmal_msg_context *msg_context;
+ int handle;
+
+ /* todo: should this be allocated from a pool to avoid kzalloc */
+ msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
+
+ if (!msg_context)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create an ID that will be passed along with our message so
+ * that when we service the VCHI reply, we can look up what
+ * message is being replied to.
+ */
+ mutex_lock(&instance->context_map_lock);
+ handle = idr_alloc(&instance->context_map, msg_context,
+ 0, 0, GFP_KERNEL);
+ mutex_unlock(&instance->context_map_lock);
+
+ if (handle < 0) {
+ kfree(msg_context);
+ return ERR_PTR(handle);
+ }
+
+ msg_context->instance = instance;
+ msg_context->handle = handle;
+
+ return msg_context;
+}
+
+static struct mmal_msg_context *
+lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
+{
+ return idr_find(&instance->context_map, handle);
+}
+
+static void
+release_msg_context(struct mmal_msg_context *msg_context)
+{
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+
+ mutex_lock(&instance->context_map_lock);
+ idr_remove(&instance->context_map, msg_context->handle);
+ mutex_unlock(&instance->context_map_lock);
+ kfree(msg_context);
+}
+
+/* deals with receipt of event to host message */
+static void event_to_host_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg, u32 msg_len)
+{
+ pr_debug("unhandled event\n");
+ pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
+ msg->u.event_to_host.client_component,
+ msg->u.event_to_host.port_type,
+ msg->u.event_to_host.port_num,
+ msg->u.event_to_host.cmd, msg->u.event_to_host.length);
+}
+
+/* workqueue scheduled callback
+ *
+ * we do this because it is important we do not call any other vchiq
+ * sync calls from within the message delivery thread
+ */
+static void buffer_work_cb(struct work_struct *work)
+{
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context, u.bulk.work);
+ struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
+
+ if (!buffer) {
+ pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
+ __func__, msg_context);
+ return;
+ }
+
+ buffer->length = msg_context->u.bulk.buffer_used;
+ buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
+ buffer->dts = msg_context->u.bulk.dts;
+ buffer->pts = msg_context->u.bulk.pts;
+
+ atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
+
+ msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
+ msg_context->u.bulk.port,
+ msg_context->u.bulk.status,
+ msg_context->u.bulk.buffer);
+}
+
+/* workqueue scheduled callback to handle receiving buffers
+ *
+ * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
+ * If we block in the service_callback context then we can't process the
+ * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
+ * vchiq_bulk_receive() call to complete.
+ */
+static void buffer_to_host_work_cb(struct work_struct *work)
+{
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context,
+ u.bulk.buffer_to_host_work);
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+ unsigned long len = msg_context->u.bulk.buffer_used;
+ int ret;
+
+ if (!len)
+ /* Dummy receive to ensure the buffers remain in order */
+ len = 8;
+ /* queue the bulk submission */
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
+ ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
+ msg_context->u.bulk.buffer->buffer,
+ /* Actual receive needs to be a multiple
+ * of 4 bytes
+ */
+ (len + 3) & ~3,
+ msg_context,
+ VCHIQ_BULK_MODE_CALLBACK);
+
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
+
+ if (ret != 0)
+ pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
+ __func__, msg_context, ret);
+}
+
+/* enqueue a bulk receive for a given message context */
+static int bulk_receive(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ struct mmal_msg_context *msg_context)
+{
+ unsigned long rd_len;
+
+ rd_len = msg->u.buffer_from_host.buffer_header.length;
+
+ if (!msg_context->u.bulk.buffer) {
+ pr_err("bulk.buffer not configured - error in buffer_from_host\n");
+
+ /* todo: this is a serious error, we should never have
+ * committed a buffer_to_host operation to the mmal
+ * port without the buffer to back it up (underflow
+ * handling) and there is no obvious way to deal with
+ * this - how is the mmal service going to react when
+ * we fail to do the xfer and reschedule a buffer when
+ * it arrives? perhaps a starved flag to indicate a
+ * waiting bulk receive?
+ */
+
+ return -EINVAL;
+ }
+
+ /* ensure we do not overrun the available buffer */
+ if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
+ rd_len = msg_context->u.bulk.buffer->buffer_size;
+ pr_warn("short read as not enough receive buffer space\n");
+ /* todo: is this the correct response, what happens to
+ * the rest of the message data?
+ */
+ }
+
+ /* store length */
+ msg_context->u.bulk.buffer_used = rd_len;
+ msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
+ msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
+
+ queue_work(msg_context->instance->bulk_wq,
+ &msg_context->u.bulk.buffer_to_host_work);
+
+ return 0;
+}
+
+/* data in message, memcpy from packet into output buffer */
+static int inline_receive(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ struct mmal_msg_context *msg_context)
+{
+ memcpy(msg_context->u.bulk.buffer->buffer,
+ msg->u.buffer_from_host.short_data,
+ msg->u.buffer_from_host.payload_in_message);
+
+ msg_context->u.bulk.buffer_used =
+ msg->u.buffer_from_host.payload_in_message;
+
+ return 0;
+}
+
+/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
+static int
+buffer_from_host(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port, struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context;
+ struct mmal_msg m;
+ int ret;
+
+ if (!port->enabled)
+ return -EINVAL;
+
+ pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
+
+ /* get context */
+ if (!buf->msg_context) {
+ pr_err("%s: msg_context not allocated, buf %p\n", __func__,
+ buf);
+ return -EINVAL;
+ }
+ msg_context = buf->msg_context;
+
+ /* store bulk message context for when data arrives */
+ msg_context->u.bulk.instance = instance;
+ msg_context->u.bulk.port = port;
+ msg_context->u.bulk.buffer = buf;
+ msg_context->u.bulk.buffer_used = 0;
+
+ /* initialise work structure ready to schedule callback */
+ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+ INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
+ buffer_to_host_work_cb);
+
+ atomic_inc(&port->buffers_with_vpu);
+
+ /* prep the buffer from host message */
+ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
+
+ m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
+ m.h.magic = MMAL_MAGIC;
+ m.h.context = msg_context->handle;
+ m.h.status = 0;
+
+ /* drvbuf is our private data passed back */
+ m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
+ m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
+ m.u.buffer_from_host.drvbuf.port_handle = port->handle;
+ m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
+
+ /* buffer header */
+ m.u.buffer_from_host.buffer_header.cmd = 0;
+ m.u.buffer_from_host.buffer_header.data =
+ (u32)(unsigned long)buf->buffer;
+ m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
+ m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
+ m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
+ m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
+ m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
+ m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
+
+ /* clear buffer type specific data */
+ memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
+ sizeof(m.u.buffer_from_host.buffer_header_type_specific));
+
+ /* no payload in message */
+ m.u.buffer_from_host.payload_in_message = 0;
+
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
+
+ ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
+ sizeof(struct mmal_msg_header) +
+ sizeof(m.u.buffer_from_host));
+ if (ret)
+ atomic_dec(&port->buffers_with_vpu);
+
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
+
+ return ret;
+}
+
+/* deals with receipt of buffer to host message */
+static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg, u32 msg_len)
+{
+ struct mmal_msg_context *msg_context;
+ u32 handle;
+
+ pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
+ __func__, instance, msg, msg_len);
+
+ if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
+ handle = msg->u.buffer_from_host.drvbuf.client_context;
+ msg_context = lookup_msg_context(instance, handle);
+
+ if (!msg_context) {
+ pr_err("drvbuf.client_context(%u) is invalid\n",
+ handle);
+ return;
+ }
+ } else {
+ pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
+ return;
+ }
+
+ msg_context->u.bulk.mmal_flags =
+ msg->u.buffer_from_host.buffer_header.flags;
+
+ if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
+ /* message reception had an error */
+ pr_warn("error %d in reply\n", msg->h.status);
+
+ msg_context->u.bulk.status = msg->h.status;
+
+ } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
+ /* empty buffer */
+ if (msg->u.buffer_from_host.buffer_header.flags &
+ MMAL_BUFFER_HEADER_FLAG_EOS) {
+ msg_context->u.bulk.status =
+ bulk_receive(instance, msg, msg_context);
+ if (msg_context->u.bulk.status == 0)
+ return; /* successful bulk submission, bulk
+ * completion will trigger callback
+ */
+ } else {
+ /* do callback with empty buffer - not EOS though */
+ msg_context->u.bulk.status = 0;
+ msg_context->u.bulk.buffer_used = 0;
+ }
+ } else if (msg->u.buffer_from_host.payload_in_message == 0) {
+ /* data is not in message, queue a bulk receive */
+ msg_context->u.bulk.status =
+ bulk_receive(instance, msg, msg_context);
+ if (msg_context->u.bulk.status == 0)
+ return; /* successful bulk submission, bulk
+ * completion will trigger callback
+ */
+
+ /* failed to submit buffer, this will end badly */
+ pr_err("error %d on bulk submission\n",
+ msg_context->u.bulk.status);
+
+ } else if (msg->u.buffer_from_host.payload_in_message <=
+ MMAL_VC_SHORT_DATA) {
+ /* data payload within message */
+ msg_context->u.bulk.status = inline_receive(instance, msg,
+ msg_context);
+ } else {
+ pr_err("message with invalid short payload\n");
+
+ /* signal error */
+ msg_context->u.bulk.status = -EINVAL;
+ msg_context->u.bulk.buffer_used =
+ msg->u.buffer_from_host.payload_in_message;
+ }
+
+ /* schedule the port callback */
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg_context *msg_context)
+{
+ msg_context->u.bulk.status = 0;
+
+ /* schedule the port callback */
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg_context *msg_context)
+{
+ pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
+
+ msg_context->u.bulk.status = -EINTR;
+
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+/* incoming event service callback */
+static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
+ enum vchiq_reason reason, struct vchiq_header *header,
+ unsigned int handle, void *cb_data,
+ void __user *cb_userdata)
+{
+ struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
+ u32 msg_len;
+ struct mmal_msg *msg;
+ struct mmal_msg_context *msg_context;
+
+ if (!instance) {
+ pr_err("Message callback passed NULL instance\n");
+ return 0;
+ }
+
+ switch (reason) {
+ case VCHIQ_MESSAGE_AVAILABLE:
+ msg = (void *)header->data;
+ msg_len = header->size;
+
+ DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
+
+ /* handling is different for buffer messages */
+ switch (msg->h.type) {
+ case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
+ vchiq_release_message(vchiq_instance, handle, header);
+ break;
+
+ case MMAL_MSG_TYPE_EVENT_TO_HOST:
+ event_to_host_cb(instance, msg, msg_len);
+ vchiq_release_message(vchiq_instance, handle, header);
+
+ break;
+
+ case MMAL_MSG_TYPE_BUFFER_TO_HOST:
+ buffer_to_host_cb(instance, msg, msg_len);
+ vchiq_release_message(vchiq_instance, handle, header);
+ break;
+
+ default:
+ /* messages dependent on header context to complete */
+ if (!msg->h.context) {
+ pr_err("received message context was null!\n");
+ vchiq_release_message(vchiq_instance, handle, header);
+ break;
+ }
+
+ msg_context = lookup_msg_context(instance,
+ msg->h.context);
+ if (!msg_context) {
+ pr_err("received invalid message context %u!\n",
+ msg->h.context);
+ vchiq_release_message(vchiq_instance, handle, header);
+ break;
+ }
+
+ /* fill in context values */
+ msg_context->u.sync.msg_handle = header;
+ msg_context->u.sync.msg = msg;
+ msg_context->u.sync.msg_len = msg_len;
+
+ /* todo: should this check (completion_done()
+ * == 1) for no one waiting? or do we need a
+ * flag to tell us the completion has been
+ * interrupted so we can free the message and
+ * its context. This probably also solves the
+ * message arriving after interruption todo
+ * below
+ */
+
+ /* complete message so caller knows it happened */
+ complete(&msg_context->u.sync.cmplt);
+ break;
+ }
+
+ break;
+
+ case VCHIQ_BULK_RECEIVE_DONE:
+ bulk_receive_cb(instance, cb_data);
+ break;
+
+ case VCHIQ_BULK_RECEIVE_ABORTED:
+ bulk_abort_cb(instance, cb_data);
+ break;
+
+ case VCHIQ_SERVICE_CLOSED:
+ /* TODO: consider if this requires action if received when
+ * driver is not explicitly closing the service
+ */
+ break;
+
+ default:
+ pr_err("Received unhandled message reason %d\n", reason);
+ break;
+ }
+
+ return 0;
+}
+
+static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ unsigned int payload_len,
+ struct mmal_msg **msg_out,
+ struct vchiq_header **msg_handle)
+{
+ struct mmal_msg_context *msg_context;
+ int ret;
+ unsigned long time_left;
+
+ /* payload size must not cause message to exceed max size */
+ if (payload_len >
+ (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
+ pr_err("payload length %d exceeds max:%d\n", payload_len,
+ (int)(MMAL_MSG_MAX_SIZE -
+ sizeof(struct mmal_msg_header)));
+ return -EINVAL;
+ }
+
+ msg_context = get_msg_context(instance);
+ if (IS_ERR(msg_context))
+ return PTR_ERR(msg_context);
+
+ init_completion(&msg_context->u.sync.cmplt);
+
+ msg->h.magic = MMAL_MAGIC;
+ msg->h.context = msg_context->handle;
+ msg->h.status = 0;
+
+ DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
+ ">>> sync message");
+
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
+
+ ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
+ sizeof(struct mmal_msg_header) +
+ payload_len);
+
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
+
+ if (ret) {
+ pr_err("error %d queuing message\n", ret);
+ release_msg_context(msg_context);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
+ SYNC_MSG_TIMEOUT * HZ);
+ if (time_left == 0) {
+ pr_err("timed out waiting for sync completion\n");
+ ret = -ETIME;
+ /* todo: what happens if the message arrives after aborting */
+ release_msg_context(msg_context);
+ return ret;
+ }
+
+ *msg_out = msg_context->u.sync.msg;
+ *msg_handle = msg_context->u.sync.msg_handle;
+ release_msg_context(msg_context);
+
+ return 0;
+}
+
+static void dump_port_info(struct vchiq_mmal_port *port)
+{
+ pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
+
+ pr_debug("buffer minimum num:%d size:%d align:%d\n",
+ port->minimum_buffer.num,
+ port->minimum_buffer.size, port->minimum_buffer.alignment);
+
+ pr_debug("buffer recommended num:%d size:%d align:%d\n",
+ port->recommended_buffer.num,
+ port->recommended_buffer.size,
+ port->recommended_buffer.alignment);
+
+ pr_debug("buffer current values num:%d size:%d align:%d\n",
+ port->current_buffer.num,
+ port->current_buffer.size, port->current_buffer.alignment);
+
+ pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
+ port->format.type,
+ port->format.encoding, port->format.encoding_variant);
+
+ pr_debug(" bitrate:%d flags:0x%x\n",
+ port->format.bitrate, port->format.flags);
+
+ if (port->format.type == MMAL_ES_TYPE_VIDEO) {
+ pr_debug
+ ("es video format: width:%d height:%d colourspace:0x%x\n",
+ port->es.video.width, port->es.video.height,
+ port->es.video.color_space);
+
+ pr_debug(" : crop xywh %d,%d,%d,%d\n",
+ port->es.video.crop.x,
+ port->es.video.crop.y,
+ port->es.video.crop.width, port->es.video.crop.height);
+ pr_debug(" : framerate %d/%d aspect %d/%d\n",
+ port->es.video.frame_rate.numerator,
+ port->es.video.frame_rate.denominator,
+ port->es.video.par.numerator, port->es.video.par.denominator);
+ }
+}
+
+static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
+{
+ /* todo do readonly fields need setting at all? */
+ p->type = port->type;
+ p->index = port->index;
+ p->index_all = 0;
+ p->is_enabled = port->enabled;
+ p->buffer_num_min = port->minimum_buffer.num;
+ p->buffer_size_min = port->minimum_buffer.size;
+ p->buffer_alignment_min = port->minimum_buffer.alignment;
+ p->buffer_num_recommended = port->recommended_buffer.num;
+ p->buffer_size_recommended = port->recommended_buffer.size;
+
+ /* only three writable fields in a port */
+ p->buffer_num = port->current_buffer.num;
+ p->buffer_size = port->current_buffer.size;
+ p->userdata = (u32)(unsigned long)port;
+}
+
+static int port_info_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ pr_debug("setting port info port %p\n", port);
+ if (!port)
+ return -1;
+ dump_port_info(port);
+
+ m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
+
+ m.u.port_info_set.component_handle = port->component->handle;
+ m.u.port_info_set.port_type = port->type;
+ m.u.port_info_set.port_index = port->index;
+
+ port_to_mmal_msg(port, &m.u.port_info_set.port);
+
+ /* elementary stream format setup */
+ m.u.port_info_set.format.type = port->format.type;
+ m.u.port_info_set.format.encoding = port->format.encoding;
+ m.u.port_info_set.format.encoding_variant =
+ port->format.encoding_variant;
+ m.u.port_info_set.format.bitrate = port->format.bitrate;
+ m.u.port_info_set.format.flags = port->format.flags;
+
+ memcpy(&m.u.port_info_set.es, &port->es,
+ sizeof(union mmal_es_specific_format));
+
+ m.u.port_info_set.format.extradata_size = port->format.extradata_size;
+ memcpy(&m.u.port_info_set.extradata, port->format.extradata,
+ port->format.extradata_size);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_info_set),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ /* return operation status */
+ ret = -rmsg->u.port_info_get_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
+ port->component->handle, port->handle);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* use port info get message to retrieve port information */
+static int port_info_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ /* port info time */
+ m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
+ m.u.port_info_get.component_handle = port->component->handle;
+ m.u.port_info_get.port_type = port->type;
+ m.u.port_info_get.index = port->index;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_info_get),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ /* return operation status */
+ ret = -rmsg->u.port_info_get_reply.status;
+ if (ret != MMAL_MSG_STATUS_SUCCESS)
+ goto release_msg;
+
+ if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
+ port->enabled = false;
+ else
+ port->enabled = true;
+
+ /* copy the values out of the message */
+ port->handle = rmsg->u.port_info_get_reply.port_handle;
+
+ /* port type and index cached to use on port info set because
+ * it does not use a port handle
+ */
+ port->type = rmsg->u.port_info_get_reply.port_type;
+ port->index = rmsg->u.port_info_get_reply.port_index;
+
+ port->minimum_buffer.num =
+ rmsg->u.port_info_get_reply.port.buffer_num_min;
+ port->minimum_buffer.size =
+ rmsg->u.port_info_get_reply.port.buffer_size_min;
+ port->minimum_buffer.alignment =
+ rmsg->u.port_info_get_reply.port.buffer_alignment_min;
+
+ port->recommended_buffer.alignment =
+ rmsg->u.port_info_get_reply.port.buffer_alignment_min;
+ port->recommended_buffer.num =
+ rmsg->u.port_info_get_reply.port.buffer_num_recommended;
+
+ port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
+ port->current_buffer.size =
+ rmsg->u.port_info_get_reply.port.buffer_size;
+
+ /* stream format */
+ port->format.type = rmsg->u.port_info_get_reply.format.type;
+ port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
+ port->format.encoding_variant =
+ rmsg->u.port_info_get_reply.format.encoding_variant;
+ port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
+ port->format.flags = rmsg->u.port_info_get_reply.format.flags;
+
+ /* elementary stream format */
+ memcpy(&port->es,
+ &rmsg->u.port_info_get_reply.es,
+ sizeof(union mmal_es_specific_format));
+ port->format.es = &port->es;
+
+ port->format.extradata_size =
+ rmsg->u.port_info_get_reply.format.extradata_size;
+ memcpy(port->format.extradata,
+ rmsg->u.port_info_get_reply.extradata,
+ port->format.extradata_size);
+
+ pr_debug("received port info\n");
+ dump_port_info(port);
+
+release_msg:
+
+ pr_debug("%s:result:%d component:0x%x port:%d\n",
+ __func__, ret, port->component->handle, port->handle);
+
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* create component on vc */
+static int create_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component,
+ const char *name)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ /* build component create message */
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
+ m.u.component_create.client_component = component->client_component;
+ strscpy_pad(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+ m.u.component_create.pid = 0;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_create),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_create_reply.status;
+ if (ret != MMAL_MSG_STATUS_SUCCESS)
+ goto release_msg;
+
+ /* a valid component response received */
+ component->handle = rmsg->u.component_create_reply.component_handle;
+ component->inputs = rmsg->u.component_create_reply.input_num;
+ component->outputs = rmsg->u.component_create_reply.output_num;
+ component->clocks = rmsg->u.component_create_reply.clock_num;
+
+ pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
+ component->handle,
+ component->inputs, component->outputs, component->clocks);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* destroys a component on vc */
+static int destroy_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
+ m.u.component_destroy.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_destroy),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_destroy_reply.status;
+
+release_msg:
+
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* enable a component on vc */
+static int enable_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
+ m.u.component_enable.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_enable),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_enable_reply.status;
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* disable a component on vc */
+static int disable_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
+ m.u.component_disable.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_disable),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_disable_reply.status;
+
+release_msg:
+
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* get version of mmal implementation */
+static int get_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out, u32 *minor_out)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_GET_VERSION;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.version),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ *major_out = rmsg->u.version.major;
+ *minor_out = rmsg->u.version.minor;
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* do a port action with a port as a parameter */
+static int port_action_port(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ enum mmal_msg_port_action_type action_type)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
+ m.u.port_action_port.component_handle = port->component->handle;
+ m.u.port_action_port.port_handle = port->handle;
+ m.u.port_action_port.action = action_type;
+
+ port_to_mmal_msg(port, &m.u.port_action_port.port);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_action_port),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_action_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
+ __func__,
+ ret, port->component->handle, port->handle,
+ port_action_type_names[action_type], action_type);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* do a port action with handles as parameters */
+static int port_action_handle(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ enum mmal_msg_port_action_type action_type,
+ u32 connect_component_handle,
+ u32 connect_port_handle)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
+
+ m.u.port_action_handle.component_handle = port->component->handle;
+ m.u.port_action_handle.port_handle = port->handle;
+ m.u.port_action_handle.action = action_type;
+
+ m.u.port_action_handle.connect_component_handle =
+ connect_component_handle;
+ m.u.port_action_handle.connect_port_handle = connect_port_handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_action_handle),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_action_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
+ __func__,
+ ret, port->component->handle, port->handle,
+ port_action_type_names[action_type],
+ action_type, connect_component_handle, connect_port_handle);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+static int port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter_id, void *value, u32 value_size)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
+
+ m.u.port_parameter_set.component_handle = port->component->handle;
+ m.u.port_parameter_set.port_handle = port->handle;
+ m.u.port_parameter_set.id = parameter_id;
+ m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
+ memcpy(&m.u.port_parameter_set.value, value, value_size);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ (4 * sizeof(u32)) + value_size,
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_parameter_set_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
+ __func__,
+ ret, port->component->handle, port->handle, parameter_id);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+static int port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter_id, void *value, u32 *value_size)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ struct vchiq_header *rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
+
+ m.u.port_parameter_get.component_handle = port->component->handle;
+ m.u.port_parameter_get.port_handle = port->handle;
+ m.u.port_parameter_get.id = parameter_id;
+ m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(struct
+ mmal_msg_port_parameter_get),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
+ /* got an unexpected message type in reply */
+ pr_err("Incorrect reply type %d\n", rmsg->h.type);
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = rmsg->u.port_parameter_get_reply.status;
+
+ /* port_parameter_get_reply.size includes the header,
+ * whilst *value_size doesn't.
+ */
+ rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
+
+ if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
+ /* Copy only as much as we have space for
+ * but report true size of parameter
+ */
+ memcpy(value, &rmsg->u.port_parameter_get_reply.value,
+ *value_size);
+ } else {
+ memcpy(value, &rmsg->u.port_parameter_get_reply.value,
+ rmsg->u.port_parameter_get_reply.size);
+ }
+ /* Always report the size of the returned parameter to the caller */
+ *value_size = rmsg->u.port_parameter_get_reply.size;
+
+ pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
+ ret, port->component->handle, port->handle, parameter_id);
+
+release_msg:
+ vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
+
+ return ret;
+}
+
+/* disables a port and drains buffers from it */
+static int port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct list_head *q, *buf_head;
+ unsigned long flags = 0;
+
+ if (!port->enabled)
+ return 0;
+
+ port->enabled = false;
+
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
+ if (ret == 0) {
+ /*
+ * Drain all queued buffers on port. This should only
+ * apply to buffers that have been queued before the port
+ * has been enabled. If the port has been enabled and buffers
+ * passed, then the buffers should have been removed from this
+ * list, and we should get the relevant callbacks via VCHIQ
+ * to release the buffers.
+ */
+ spin_lock_irqsave(&port->slock, flags);
+
+ list_for_each_safe(buf_head, q, &port->buffers) {
+ struct mmal_buffer *mmalbuf;
+
+ mmalbuf = list_entry(buf_head, struct mmal_buffer,
+ list);
+ list_del(buf_head);
+ if (port->buffer_cb) {
+ mmalbuf->length = 0;
+ mmalbuf->mmal_flags = 0;
+ mmalbuf->dts = MMAL_TIME_UNKNOWN;
+ mmalbuf->pts = MMAL_TIME_UNKNOWN;
+ port->buffer_cb(instance,
+ port, 0, mmalbuf);
+ }
+ }
+
+ spin_unlock_irqrestore(&port->slock, flags);
+
+ ret = port_info_get(instance, port);
+ }
+
+ return ret;
+}
+
+/* enable a port */
+static int port_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ unsigned int hdr_count;
+ struct list_head *q, *buf_head;
+ int ret;
+
+ if (port->enabled)
+ return 0;
+
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
+ if (ret)
+ goto done;
+
+ port->enabled = true;
+
+ if (port->buffer_cb) {
+ /* send buffer headers to videocore */
+ hdr_count = 1;
+ list_for_each_safe(buf_head, q, &port->buffers) {
+ struct mmal_buffer *mmalbuf;
+
+ mmalbuf = list_entry(buf_head, struct mmal_buffer,
+ list);
+ ret = buffer_from_host(instance, port, mmalbuf);
+ if (ret)
+ goto done;
+
+ list_del(buf_head);
+ hdr_count++;
+ if (hdr_count > port->current_buffer.num)
+ break;
+ }
+ }
+
+ ret = port_info_get(instance, port);
+
+done:
+ return ret;
+}
+
+/* ------------------------------------------------------------------
+ * Exported API
+ *------------------------------------------------------------------
+ */
+
+int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_info_set(instance, port);
+ if (ret)
+ goto release_unlock;
+
+ /* read what has actually been set */
+ ret = port_info_get(instance, port);
+
+release_unlock:
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
+
+int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter, void *value, u32 value_size)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_parameter_set(instance, port, parameter, value, value_size);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
+
+int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter, void *value, u32 *value_size)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_parameter_get(instance, port, parameter, value, value_size);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
+
+/* enable a port
+ *
+ * enables a port and queues buffers for satisfying callbacks if we
+ * provide a callback handler
+ */
+int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ vchiq_mmal_buffer_cb buffer_cb)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ /* already enabled - noop */
+ if (port->enabled) {
+ ret = 0;
+ goto unlock;
+ }
+
+ port->buffer_cb = buffer_cb;
+
+ ret = port_enable(instance, port);
+
+unlock:
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
+
+int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (!port->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = port_disable(instance, port);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
+
+/* ports will be connected in a tunneled manner so data buffers
+ * are not handled by client.
+ */
+int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *src,
+ struct vchiq_mmal_port *dst)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ /* disconnect ports if connected */
+ if (src->connected) {
+ ret = port_disable(instance, src);
+ if (ret) {
+ pr_err("failed disabling src port(%d)\n", ret);
+ goto release_unlock;
+ }
+
+ /* do not need to disable the destination port as they
+ * are connected and it is done automatically
+ */
+
+ ret = port_action_handle(instance, src,
+ MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
+ src->connected->component->handle,
+ src->connected->handle);
+ if (ret < 0) {
+ pr_err("failed disconnecting src port\n");
+ goto release_unlock;
+ }
+ src->connected->enabled = false;
+ src->connected = NULL;
+ }
+
+ if (!dst) {
+ /* do not make new connection */
+ ret = 0;
+ pr_debug("not making new connection\n");
+ goto release_unlock;
+ }
+
+ /* copy src port format to dst */
+ dst->format.encoding = src->format.encoding;
+ dst->es.video.width = src->es.video.width;
+ dst->es.video.height = src->es.video.height;
+ dst->es.video.crop.x = src->es.video.crop.x;
+ dst->es.video.crop.y = src->es.video.crop.y;
+ dst->es.video.crop.width = src->es.video.crop.width;
+ dst->es.video.crop.height = src->es.video.crop.height;
+ dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
+ dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
+
+ /* set new format */
+ ret = port_info_set(instance, dst);
+ if (ret) {
+ pr_debug("setting port info failed\n");
+ goto release_unlock;
+ }
+
+ /* read what has actually been set */
+ ret = port_info_get(instance, dst);
+ if (ret) {
+ pr_debug("read back port info failed\n");
+ goto release_unlock;
+ }
+
+ /* connect two ports together */
+ ret = port_action_handle(instance, src,
+ MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
+ dst->component->handle, dst->handle);
+ if (ret < 0) {
+ pr_debug("connecting port %d:%d to %d:%d failed\n",
+ src->component->handle, src->handle,
+ dst->component->handle, dst->handle);
+ goto release_unlock;
+ }
+ src->connected = dst;
+
+release_unlock:
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
+
+int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ struct mmal_buffer *buffer)
+{
+ unsigned long flags = 0;
+ int ret;
+
+ ret = buffer_from_host(instance, port, buffer);
+ if (ret == -EINVAL) {
+ /* Port is disabled. Queue for when it is enabled. */
+ spin_lock_irqsave(&port->slock, flags);
+ list_add_tail(&buffer->list, &port->buffers);
+ spin_unlock_irqrestore(&port->slock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
+
+int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
+ struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = get_msg_context(instance);
+
+ if (IS_ERR(msg_context))
+ return (PTR_ERR(msg_context));
+
+ buf->msg_context = msg_context;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
+
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = buf->msg_context;
+
+ if (msg_context)
+ release_msg_context(msg_context);
+ buf->msg_context = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
+
+/* Initialise a mmal component and its ports
+ *
+ */
+int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
+ const char *name,
+ struct vchiq_mmal_component **component_out)
+{
+ int ret;
+ int idx; /* port index */
+ struct vchiq_mmal_component *component = NULL;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
+ if (!instance->component[idx].in_use) {
+ component = &instance->component[idx];
+ component->in_use = true;
+ break;
+ }
+ }
+
+ if (!component) {
+ ret = -EINVAL; /* todo is this correct error? */
+ goto unlock;
+ }
+
+ /* We need a handle to reference back to our component structure.
+ * Use the array index in instance->component rather than rolling
+ * another IDR.
+ */
+ component->client_component = idx;
+
+ ret = create_component(instance, component, name);
+ if (ret < 0) {
+ pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
+ __func__, ret);
+ goto unlock;
+ }
+
+ /* ports info needs gathering */
+ component->control.type = MMAL_PORT_TYPE_CONTROL;
+ component->control.index = 0;
+ component->control.component = component;
+ spin_lock_init(&component->control.slock);
+ INIT_LIST_HEAD(&component->control.buffers);
+ ret = port_info_get(instance, &component->control);
+ if (ret < 0)
+ goto release_component;
+
+ for (idx = 0; idx < component->inputs; idx++) {
+ component->input[idx].type = MMAL_PORT_TYPE_INPUT;
+ component->input[idx].index = idx;
+ component->input[idx].component = component;
+ spin_lock_init(&component->input[idx].slock);
+ INIT_LIST_HEAD(&component->input[idx].buffers);
+ ret = port_info_get(instance, &component->input[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ for (idx = 0; idx < component->outputs; idx++) {
+ component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
+ component->output[idx].index = idx;
+ component->output[idx].component = component;
+ spin_lock_init(&component->output[idx].slock);
+ INIT_LIST_HEAD(&component->output[idx].buffers);
+ ret = port_info_get(instance, &component->output[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ for (idx = 0; idx < component->clocks; idx++) {
+ component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
+ component->clock[idx].index = idx;
+ component->clock[idx].component = component;
+ spin_lock_init(&component->clock[idx].slock);
+ INIT_LIST_HEAD(&component->clock[idx].buffers);
+ ret = port_info_get(instance, &component->clock[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ *component_out = component;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return 0;
+
+release_component:
+ destroy_component(instance, component);
+unlock:
+ if (component)
+ component->in_use = false;
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
+
+/*
+ * cause a mmal component to be destroyed
+ */
+int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (component->enabled)
+ ret = disable_component(instance, component);
+
+ ret = destroy_component(instance, component);
+
+ component->in_use = false;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
+
+/*
+ * cause a mmal component to be enabled
+ */
+int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (component->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = enable_component(instance, component);
+ if (ret == 0)
+ component->enabled = true;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
+
+/*
+ * cause a mmal component to be enabled
+ */
+int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (!component->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = disable_component(instance, component);
+ if (ret == 0)
+ component->enabled = false;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
+
+int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out, u32 *minor_out)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = get_version(instance, major_out, minor_out);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_version);
+
+int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
+{
+ int status = 0;
+
+ if (!instance)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ vchiq_use_service(instance->vchiq_instance, instance->service_handle);
+
+ status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
+ if (status != 0)
+ pr_err("mmal-vchiq: VCHIQ close failed\n");
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ vchiq_shutdown(instance->vchiq_instance);
+ destroy_workqueue(instance->bulk_wq);
+
+ idr_destroy(&instance->context_map);
+
+ kfree(instance);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
+
+int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance)
+{
+ int status;
+ int err = -ENODEV;
+ struct vchiq_mmal_instance *instance;
+ struct vchiq_instance *vchiq_instance;
+ struct vchiq_service_params_kernel params = {
+ .version = VC_MMAL_VER,
+ .version_min = VC_MMAL_MIN_VER,
+ .fourcc = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
+ .callback = mmal_service_callback,
+ .userdata = NULL,
+ };
+ struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(dev->parent);
+
+ /* compile time checks to ensure structure size as they are
+ * directly (de)serialised from memory.
+ */
+
+ /* ensure the header structure has packed to the correct size */
+ BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
+
+ /* ensure message structure does not exceed maximum length */
+ BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
+
+ /* mmal port struct is correct size */
+ BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
+
+ /* create a vchi instance */
+ status = vchiq_initialise(&mgmt->state, &vchiq_instance);
+ if (status) {
+ pr_err("Failed to initialise VCHI instance (status=%d)\n",
+ status);
+ return -EIO;
+ }
+
+ status = vchiq_connect(vchiq_instance);
+ if (status) {
+ pr_err("Failed to connect VCHI instance (status=%d)\n", status);
+ err = -EIO;
+ goto err_shutdown_vchiq;
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+
+ if (!instance) {
+ err = -ENOMEM;
+ goto err_shutdown_vchiq;
+ }
+
+ mutex_init(&instance->vchiq_mutex);
+
+ instance->vchiq_instance = vchiq_instance;
+
+ mutex_init(&instance->context_map_lock);
+ idr_init_base(&instance->context_map, 1);
+
+ params.userdata = instance;
+
+ instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
+ WQ_MEM_RECLAIM);
+ if (!instance->bulk_wq)
+ goto err_free;
+
+ status = vchiq_open_service(vchiq_instance, &params,
+ &instance->service_handle);
+ if (status) {
+ pr_err("Failed to open VCHI service connection (status=%d)\n",
+ status);
+ goto err_close_services;
+ }
+
+ vchiq_release_service(instance->vchiq_instance, instance->service_handle);
+
+ *out_instance = instance;
+
+ return 0;
+
+err_close_services:
+ vchiq_close_service(instance->vchiq_instance, instance->service_handle);
+ destroy_workqueue(instance->bulk_wq);
+err_free:
+ kfree(instance);
+err_shutdown_vchiq:
+ vchiq_shutdown(vchiq_instance);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vchiq_mmal_init);
+
+MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
+MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h
new file mode 100644
index 000000000000..8c3959f6f97f
--- /dev/null
+++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Broadcom BCM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * Authors: Vincent Sanders @ Collabora
+ * Dave Stevenson @ Broadcom
+ * (now dave.stevenson@raspberrypi.org)
+ * Simon Mellor @ Broadcom
+ * Luke Diamand @ Broadcom
+ *
+ * MMAL interface to VCHIQ message passing
+ */
+
+#ifndef MMAL_VCHIQ_H
+#define MMAL_VCHIQ_H
+
+#include "mmal-common.h"
+#include "mmal-msg-format.h"
+
+#define MAX_PORT_COUNT 4
+
+/* Maximum size of the format extradata. */
+#define MMAL_FORMAT_EXTRADATA_MAX_SIZE 128
+
+struct vchiq_mmal_instance;
+struct device;
+
+enum vchiq_mmal_es_type {
+ MMAL_ES_TYPE_UNKNOWN, /**< Unknown elementary stream type */
+ MMAL_ES_TYPE_CONTROL, /**< Elementary stream of control commands */
+ MMAL_ES_TYPE_AUDIO, /**< Audio elementary stream */
+ MMAL_ES_TYPE_VIDEO, /**< Video elementary stream */
+ MMAL_ES_TYPE_SUBPICTURE /**< Sub-picture elementary stream */
+};
+
+struct vchiq_mmal_port_buffer {
+ unsigned int num; /* number of buffers */
+ u32 size; /* size of buffers */
+ u32 alignment; /* alignment of buffers */
+};
+
+struct vchiq_mmal_port;
+
+typedef void (*vchiq_mmal_buffer_cb)(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ int status, struct mmal_buffer *buffer);
+
+struct vchiq_mmal_port {
+ bool enabled;
+ u32 handle;
+ u32 type; /* port type, cached to use on port info set */
+ u32 index; /* port index, cached to use on port info set */
+
+ /* component port belongs to, allows simple deref */
+ struct vchiq_mmal_component *component;
+
+ struct vchiq_mmal_port *connected; /* port connected to */
+
+ /* buffer info */
+ struct vchiq_mmal_port_buffer minimum_buffer;
+ struct vchiq_mmal_port_buffer recommended_buffer;
+ struct vchiq_mmal_port_buffer current_buffer;
+
+ /* stream format */
+ struct mmal_es_format_local format;
+ /* elementary stream format */
+ union mmal_es_specific_format es;
+
+ /* data buffers to fill */
+ struct list_head buffers;
+ /* lock to serialise adding and removing buffers from list */
+ spinlock_t slock;
+
+ /* Count of buffers the VPU has yet to return */
+ atomic_t buffers_with_vpu;
+ /* callback on buffer completion */
+ vchiq_mmal_buffer_cb buffer_cb;
+ /* callback context */
+ void *cb_ctx;
+};
+
+struct vchiq_mmal_component {
+ bool in_use;
+ bool enabled;
+ u32 handle; /* VideoCore handle for component */
+ u32 inputs; /* Number of input ports */
+ u32 outputs; /* Number of output ports */
+ u32 clocks; /* Number of clock ports */
+ struct vchiq_mmal_port control; /* control port */
+ struct vchiq_mmal_port input[MAX_PORT_COUNT]; /* input ports */
+ struct vchiq_mmal_port output[MAX_PORT_COUNT]; /* output ports */
+ struct vchiq_mmal_port clock[MAX_PORT_COUNT]; /* clock ports */
+ u32 client_component; /* Used to ref back to client struct */
+};
+
+int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance);
+int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance);
+
+/* Initialise a mmal component and its ports
+ *
+ */
+int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
+ const char *name, struct vchiq_mmal_component **component_out);
+
+int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+/* enable a mmal port
+ *
+ * enables a port and, if a buffer callback provided, enqueues buffer
+ * headers as appropriate for the port.
+ */
+int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ vchiq_mmal_buffer_cb buffer_cb);
+
+/* disable a port
+ *
+ * disable a port will dequeue any pending buffers
+ */
+int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port);
+
+int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter,
+ void *value,
+ u32 value_size);
+
+int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter,
+ void *value,
+ u32 *value_size);
+
+int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port);
+
+int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *src,
+ struct vchiq_mmal_port *dst);
+
+int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out,
+ u32 *minor_out);
+
+int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ struct mmal_buffer *buf);
+
+int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
+ struct mmal_buffer *buf);
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf);
+#endif /* MMAL_VCHIQ_H */
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index c58e1fdd1a5f..c7e05f7bc199 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -676,7 +676,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
status = ssam_serdev_setup(ssh, serdev);
if (status) {
- status = dev_err_probe(dev, status, "failed to setup serdev\n");
+ dev_err_probe(dev, status, "failed to setup serdev\n");
goto err_devinit;
}
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 6081b0146d5f..3dd22856570f 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -671,7 +671,7 @@ static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
ptl->rtx_timeout.expires = expires;
- mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
+ mod_delayed_work(system_percpu_wq, &ptl->rtx_timeout.reaper, delta);
}
spin_unlock(&ptl->rtx_timeout.lock);
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 879ca9ee7ff6..a356e4956562 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -434,7 +434,7 @@ static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
rtl->rtx_timeout.expires = expires;
- mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
+ mod_delayed_work(system_percpu_wq, &rtl->rtx_timeout.reaper, delta);
}
spin_unlock(&rtl->rtx_timeout.lock);
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 3b30cfe3466b..a9dcb0bbe90e 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -862,7 +862,7 @@ static int __init san_init(void)
{
int ret;
- san_wq = alloc_workqueue("san_wq", 0, 0);
+ san_wq = alloc_workqueue("san_wq", WQ_PERCPU, 0);
if (!san_wq)
return -ENOMEM;
ret = platform_driver_register(&surface_acpi_notify);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index a594d5fcfcfd..78ac3a8fbb73 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -491,24 +491,13 @@ static const struct of_device_id ssam_platform_hub_of_match[] __maybe_unused = {
static int ssam_platform_hub_probe(struct platform_device *pdev)
{
const struct software_node **nodes;
- const struct of_device_id *match;
- struct device_node *fdt_root;
struct ssam_controller *ctrl;
struct fwnode_handle *root;
int status;
nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
if (!nodes) {
- fdt_root = of_find_node_by_path("/");
- if (!fdt_root)
- return -ENODEV;
-
- match = of_match_node(ssam_platform_hub_of_match, fdt_root);
- of_node_put(fdt_root);
- if (!match)
- return -ENODEV;
-
- nodes = (const struct software_node **)match->data;
+ nodes = (const struct software_node **)of_machine_get_match_data(ssam_platform_hub_of_match);
if (!nodes)
return -ENODEV;
}
diff --git a/drivers/platform/wmi/Kconfig b/drivers/platform/wmi/Kconfig
new file mode 100644
index 000000000000..77fcbb18746b
--- /dev/null
+++ b/drivers/platform/wmi/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# ACPI WMI Core
+#
+
+menuconfig ACPI_WMI
+ tristate "ACPI-WMI support"
+ depends on ACPI && X86
+ help
+ This option enables support for the ACPI-WMI driver core.
+
+ The ACPI-WMI interface is a proprietary extension of ACPI allowing
+ the platform firmware to expose WMI (Windows Management Instrumentation)
+ objects used for managing various aspects of the underlying system.
+ Mapping between ACPI control methods and WMI objects happens through
+ special mapper devices (PNP0C14) defined inside the ACPI tables.
+
+ Enabling this option is necessary for building the vendor specific
+ ACPI-WMI client drivers for Acer, Dell an HP machines (among others).
+
+ It is safe to enable this option even for machines that do not contain
+ any ACPI-WMI mapper devices at all.
+
+if ACPI_WMI
+
+config ACPI_WMI_LEGACY_DEVICE_NAMES
+ bool "Use legacy WMI device naming scheme"
+ help
+ Say Y here to force the WMI driver core to use the old WMI device naming
+ scheme when creating WMI devices. Doing so might be necessary for some
+ userspace applications but will cause the registration of WMI devices with
+ the same GUID to fail in some corner cases.
+
+endif # ACPI_WMI
diff --git a/drivers/platform/wmi/Makefile b/drivers/platform/wmi/Makefile
new file mode 100644
index 000000000000..98393d7391ec
--- /dev/null
+++ b/drivers/platform/wmi/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for linux/drivers/platform/wmi
+# ACPI WMI core
+#
+
+wmi-y := core.o
+obj-$(CONFIG_ACPI_WMI) += wmi.o
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/wmi/core.c
index 4e86a422f05f..6878c4fcb0b5 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/wmi/core.c
@@ -142,14 +142,6 @@ static inline void get_acpi_method_name(const struct wmi_block *wblock,
buffer[4] = '\0';
}
-static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock)
-{
- if (wblock->gblock.flags & ACPI_WMI_STRING)
- return ACPI_TYPE_STRING;
- else
- return ACPI_TYPE_BUFFER;
-}
-
static int wmidev_match_guid(struct device *dev, const void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
@@ -351,9 +343,16 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 met
params[0].integer.value = instance;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = method_id;
- params[2].type = get_param_acpi_type(wblock);
- params[2].buffer.length = in->length;
- params[2].buffer.pointer = in->pointer;
+
+ if (wblock->gblock.flags & ACPI_WMI_STRING) {
+ params[2].type = ACPI_TYPE_STRING;
+ params[2].string.length = in->length;
+ params[2].string.pointer = in->pointer;
+ } else {
+ params[2].type = ACPI_TYPE_BUFFER;
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
get_acpi_method_name(wblock, 'M', method);
@@ -519,9 +518,16 @@ acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct
input.pointer = params;
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
- params[1].type = get_param_acpi_type(wblock);
- params[1].buffer.length = in->length;
- params[1].buffer.pointer = in->pointer;
+
+ if (wblock->gblock.flags & ACPI_WMI_STRING) {
+ params[1].type = ACPI_TYPE_STRING;
+ params[1].string.length = in->length;
+ params[1].string.pointer = in->pointer;
+ } else {
+ params[1].type = ACPI_TYPE_BUFFER;
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+ }
get_acpi_method_name(wblock, 'S', method);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c883a28e0916..4cb7d97a9fcc 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -16,36 +16,6 @@ menuconfig X86_PLATFORM_DEVICES
if X86_PLATFORM_DEVICES
-config ACPI_WMI
- tristate "WMI"
- depends on ACPI
- help
- This driver adds support for the ACPI-WMI (Windows Management
- Instrumentation) mapper device (PNP0C14) found on some systems.
-
- ACPI-WMI is a proprietary extension to ACPI to expose parts of the
- ACPI firmware to userspace - this is done through various vendor
- defined methods and data blocks in a PNP0C14 device, which are then
- made available for userspace to call.
-
- The implementation of this in Linux currently only exposes this to
- other kernel space drivers.
-
- This driver is a required dependency to build the firmware specific
- drivers needed on many machines, including Acer and HP laptops.
-
- It is safe to enable this driver even if your DSDT doesn't define
- any ACPI-WMI devices.
-
-config ACPI_WMI_LEGACY_DEVICE_NAMES
- bool "Use legacy WMI device naming scheme"
- depends on ACPI_WMI
- help
- Say Y here to force the WMI driver core to use the old WMI device naming
- scheme when creating WMI devices. Doing so might be necessary for some
- userspace applications but will cause the registration of WMI devices with
- the same GUID to fail in some corner cases.
-
config WMI_BMOF
tristate "WMI embedded Binary MOF driver"
depends on ACPI_WMI
@@ -74,6 +44,8 @@ config HUAWEI_WMI
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
+source "drivers/platform/x86/uniwill/Kconfig"
+
config UV_SYSFS
tristate "Sysfs structure for UV systems"
depends on X86_UV
@@ -262,6 +234,18 @@ config ASUS_WIRELESS
If you choose to compile this driver as a module the module will be
called asus-wireless.
+config ASUS_ARMOURY
+ tristate "ASUS Armoury driver"
+ depends on ASUS_WMI
+ select FW_ATTR_CLASS
+ help
+ Say Y here if you have a WMI aware Asus machine and would like to use the
+ firmware_attributes API to control various settings typically exposed in
+ the ASUS Armoury Crate application available on Windows.
+
+ To compile this driver as a module, choose M here: the module will
+ be called asus-armoury.
+
config ASUS_WMI
tristate "ASUS WMI Driver"
depends on ACPI_WMI
@@ -284,6 +268,17 @@ config ASUS_WMI
To compile this driver as a module, choose M here: the module will
be called asus-wmi.
+config ASUS_WMI_DEPRECATED_ATTRS
+ bool "BIOS option support in WMI platform (DEPRECATED)"
+ depends on ASUS_WMI
+ default y
+ help
+ Say Y to expose the configurable BIOS options through the asus-wmi
+ driver.
+
+ This can be used with or without the asus-armoury driver which
+ has the same attributes, but more, and better features.
+
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
@@ -316,6 +311,19 @@ config ASUS_TF103C_DOCK
If you have an Asus TF103C tablet say Y or M here, for a generic x86
distro config say M here.
+config AYANEO_EC
+ tristate "Ayaneo EC platform control"
+ depends on DMI
+ depends on ACPI_EC
+ depends on ACPI_BATTERY
+ depends on HWMON
+ help
+ Enables support for the platform EC of Ayaneo devices. This
+ includes fan control, fan speed, charge limit, magic
+ module detection, and controller power control.
+
+ If you have an Ayaneo device, say Y or M here.
+
config MERAKI_MX100
tristate "Cisco Meraki MX100 Platform Driver"
depends on GPIOLIB
@@ -1031,9 +1039,7 @@ config OXP_EC
help
Enables support for the platform EC of OneXPlayer and AOKZOE
handheld devices. This includes fan speed, fan controls, and
- disabling the default TDP behavior of the device. Due to legacy
- reasons, this driver also provides hwmon functionality to Ayaneo
- devices and the OrangePi Neo.
+ disabling the default TDP behavior of the device.
source "drivers/platform/x86/tuxedo/Kconfig"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c7db2a88c11a..d25762f7114f 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -5,7 +5,6 @@
#
# Windows Management Interface
-obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
@@ -33,12 +32,16 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
# ASUS
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
+obj-$(CONFIG_ASUS_ARMOURY) += asus-armoury.o
obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
+# Ayaneo
+obj-$(CONFIG_AYANEO_EC) += ayaneo-ec.o
+
# Cisco/Meraki
obj-$(CONFIG_MERAKI_MX100) += meraki-mx100.o
@@ -110,6 +113,9 @@ obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
# before toshiba_acpi initializes
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+# Uniwill
+obj-y += uniwill/
+
# Inspur
obj-$(CONFIG_INSPUR_PLATFORM_PROFILE) += inspur_platform_profile.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d848afc91f87..bf97381faf58 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -12,10 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dmi.h>
+#include <linux/fixp-arith.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
@@ -68,10 +70,27 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_SET_GAMING_LED_METHODID 2
#define ACER_WMID_GET_GAMING_LED_METHODID 4
#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
-#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
+#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID 14
+#define ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID 15
+#define ACER_WMID_SET_GAMING_FAN_SPEED_METHODID 16
+#define ACER_WMID_GET_GAMING_FAN_SPEED_METHODID 17
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
#define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23
+#define ACER_GAMING_FAN_BEHAVIOR_CPU BIT(0)
+#define ACER_GAMING_FAN_BEHAVIOR_GPU BIT(3)
+
+#define ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_BEHAVIOR_ID_MASK GENMASK_ULL(15, 0)
+#define ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK GENMASK(17, 16)
+#define ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK GENMASK(23, 22)
+#define ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK GENMASK(9, 8)
+#define ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK GENMASK(15, 14)
+
+#define ACER_GAMING_FAN_SPEED_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_SPEED_ID_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_SPEED_VALUE_MASK GENMASK_ULL(15, 8)
+
#define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0)
#define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0)
#define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8)
@@ -122,6 +141,17 @@ enum acer_wmi_predator_v4_sensor_id {
ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A,
};
+enum acer_wmi_gaming_fan_id {
+ ACER_WMID_CPU_FAN = 0x01,
+ ACER_WMID_GPU_FAN = 0x04,
+};
+
+enum acer_wmi_gaming_fan_mode {
+ ACER_WMID_FAN_MODE_AUTO = 0x01,
+ ACER_WMID_FAN_MODE_TURBO = 0x02,
+ ACER_WMID_FAN_MODE_CUSTOM = 0x03,
+};
+
enum acer_wmi_predator_v4_oc {
ACER_WMID_OC_NORMAL = 0x0000,
ACER_WMID_OC_TURBO = 0x0002,
@@ -279,6 +309,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_TURBO_FAN BIT(9)
#define ACER_CAP_PLATFORM_PROFILE BIT(10)
#define ACER_CAP_HWMON BIT(11)
+#define ACER_CAP_PWM BIT(12)
/*
* Interface type flags
@@ -373,6 +404,7 @@ struct quirk_entry {
u8 cpu_fans;
u8 gpu_fans;
u8 predator_v4;
+ u8 pwm;
};
static struct quirk_entry *quirks;
@@ -392,6 +424,9 @@ static void __init set_quirks(void)
if (quirks->predator_v4)
interface->capability |= ACER_CAP_PLATFORM_PROFILE |
ACER_CAP_HWMON;
+
+ if (quirks->pwm)
+ interface->capability |= ACER_CAP_PWM;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -431,6 +466,7 @@ static struct quirk_entry quirk_acer_predator_ph16_72 = {
.cpu_fans = 1,
.gpu_fans = 1,
.predator_v4 = 1,
+ .pwm = 1,
};
static struct quirk_entry quirk_acer_predator_pt14_51 = {
@@ -438,6 +474,7 @@ static struct quirk_entry quirk_acer_predator_pt14_51 = {
.cpu_fans = 1,
.gpu_fans = 1,
.predator_v4 = 1,
+ .pwm = 1,
};
static struct quirk_entry quirk_acer_predator_v4 = {
@@ -658,6 +695,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Predator Helios Neo 16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PHN16-72"),
+ },
+ .driver_data = &quirk_acer_predator_ph16_72,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH18-71",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -1564,9 +1610,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
case ACER_CAP_TURBO_LED:
method_id = ACER_WMID_SET_GAMING_LED_METHODID;
break;
- case ACER_CAP_TURBO_FAN:
- method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
- break;
default:
return AE_BAD_PARAMETER;
}
@@ -1617,25 +1660,125 @@ static int WMID_gaming_get_sys_info(u32 command, u64 *out)
return 0;
}
-static void WMID_gaming_set_fan_mode(u8 fan_mode)
+static int WMID_gaming_set_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode mode)
{
- /* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
- u64 gpu_fan_config1 = 0, gpu_fan_config2 = 0;
- int i;
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap);
+
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU)
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK, mode);
+
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU)
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK, mode);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result))
+ return -EIO;
+
+ return 0;
+}
+
+static int WMID_gaming_get_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode *mode)
+{
+ acpi_status status;
+ u32 input = 0;
+ u64 result;
+ int value;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap);
+ status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result))
+ return -EIO;
+
+ /* Theoretically multiple fans can be specified, but this is currently unused */
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU)
+ value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK, result);
+ else if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU)
+ value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK, result);
+ else
+ return -EINVAL;
+
+ if (value < ACER_WMID_FAN_MODE_AUTO || value > ACER_WMID_FAN_MODE_CUSTOM)
+ return -ENXIO;
+
+ *mode = value;
+
+ return 0;
+}
+
+static void WMID_gaming_set_fan_mode(enum acer_wmi_gaming_fan_mode mode)
+{
+ u16 fan_bitmap = 0;
if (quirks->cpu_fans > 0)
- gpu_fan_config2 |= 1;
- for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
- gpu_fan_config2 |= 1 << (i + 1);
- for (i = 0; i < quirks->gpu_fans; ++i)
- gpu_fan_config2 |= 1 << (i + 3);
- if (quirks->cpu_fans > 0)
- gpu_fan_config1 |= fan_mode;
- for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
- gpu_fan_config1 |= fan_mode << (2 * i + 2);
- for (i = 0; i < quirks->gpu_fans; ++i)
- gpu_fan_config1 |= fan_mode << (2 * i + 6);
- WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
+ fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_CPU;
+
+ if (quirks->gpu_fans > 0)
+ fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_GPU;
+
+ WMID_gaming_set_fan_behavior(fan_bitmap, mode);
+}
+
+static int WMID_gaming_set_gaming_fan_speed(u8 fan, u8 speed)
+{
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan);
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_VALUE_MASK, speed);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_SPEED_METHODID, input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ switch (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result)) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return -ENODEV;
+ case 0x02:
+ return -EINVAL;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int WMID_gaming_get_gaming_fan_speed(u8 fan, u8 *speed)
+{
+ acpi_status status;
+ u32 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan);
+
+ status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_SPEED_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result))
+ return -ENODEV;
+
+ *speed = FIELD_GET(ACER_GAMING_FAN_SPEED_VALUE_MASK, result);
+
+ return 0;
}
static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value)
@@ -1922,7 +2065,7 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_u64(0x1, ACER_CAP_TURBO_LED);
/* Set FAN mode to auto */
- WMID_gaming_set_fan_mode(0x1);
+ WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_AUTO);
/* Set OC to normal */
if (has_cap(ACER_CAP_TURBO_OC)) {
@@ -1936,7 +2079,7 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
/* Set FAN mode to turbo */
- WMID_gaming_set_fan_mode(0x2);
+ WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_TURBO);
/* Set OC to turbo mode */
if (has_cap(ACER_CAP_TURBO_OC)) {
@@ -2767,6 +2910,16 @@ static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_
[1] = ACER_WMID_SENSOR_GPU_FAN_SPEED,
};
+static const enum acer_wmi_gaming_fan_id acer_wmi_fan_channel_to_fan_id[] = {
+ [0] = ACER_WMID_CPU_FAN,
+ [1] = ACER_WMID_GPU_FAN,
+};
+
+static const u16 acer_wmi_fan_channel_to_fan_bitmap[] = {
+ [0] = ACER_GAMING_FAN_BEHAVIOR_CPU,
+ [1] = ACER_GAMING_FAN_BEHAVIOR_GPU,
+};
+
static umode_t acer_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
@@ -2778,6 +2931,11 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data,
case hwmon_temp:
sensor_id = acer_wmi_temp_channel_to_sensor_id[channel];
break;
+ case hwmon_pwm:
+ if (!has_cap(ACER_CAP_PWM))
+ return 0;
+
+ fallthrough;
case hwmon_fan:
sensor_id = acer_wmi_fan_channel_to_sensor_id[channel];
break;
@@ -2785,8 +2943,12 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data,
return 0;
}
- if (*supported_sensors & BIT(sensor_id - 1))
+ if (*supported_sensors & BIT(sensor_id - 1)) {
+ if (type == hwmon_pwm)
+ return 0644;
+
return 0444;
+ }
return 0;
}
@@ -2795,6 +2957,9 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING;
+ enum acer_wmi_gaming_fan_mode mode;
+ u16 fan_bitmap;
+ u8 fan, speed;
u64 result;
int ret;
@@ -2820,6 +2985,80 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
*val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
return 0;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ fan = acer_wmi_fan_channel_to_fan_id[channel];
+ ret = WMID_gaming_get_gaming_fan_speed(fan, &speed);
+ if (ret < 0)
+ return ret;
+
+ *val = fixp_linear_interpolate(0, 0, 100, U8_MAX, speed);
+ return 0;
+ case hwmon_pwm_enable:
+ fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel];
+ ret = WMID_gaming_get_fan_behavior(fan_bitmap, &mode);
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case ACER_WMID_FAN_MODE_AUTO:
+ *val = 2;
+ return 0;
+ case ACER_WMID_FAN_MODE_TURBO:
+ *val = 0;
+ return 0;
+ case ACER_WMID_FAN_MODE_CUSTOM:
+ *val = 1;
+ return 0;
+ default:
+ return -ENXIO;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int acer_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ enum acer_wmi_gaming_fan_mode mode;
+ u16 fan_bitmap;
+ u8 fan, speed;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ fan = acer_wmi_fan_channel_to_fan_id[channel];
+ speed = fixp_linear_interpolate(0, 0, U8_MAX, 100,
+ clamp_val(val, 0, U8_MAX));
+
+ return WMID_gaming_set_gaming_fan_speed(fan, speed);
+ case hwmon_pwm_enable:
+ fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel];
+
+ switch (val) {
+ case 0:
+ mode = ACER_WMID_FAN_MODE_TURBO;
+ break;
+ case 1:
+ mode = ACER_WMID_FAN_MODE_CUSTOM;
+ break;
+ case 2:
+ mode = ACER_WMID_FAN_MODE_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return WMID_gaming_set_fan_behavior(fan_bitmap, mode);
+ default:
+ return -EOPNOTSUPP;
+ }
default:
return -EOPNOTSUPP;
}
@@ -2835,11 +3074,16 @@ static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
HWMON_F_INPUT,
HWMON_F_INPUT
),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE
+ ),
NULL
};
static const struct hwmon_ops acer_wmi_hwmon_ops = {
.read = acer_wmi_hwmon_read,
+ .write = acer_wmi_hwmon_write,
.is_visible = acer_wmi_hwmon_is_visible,
};
diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c
index a465ac6f607e..83863a5e0fbc 100644
--- a/drivers/platform/x86/amd/hfi/hfi.c
+++ b/drivers/platform/x86/amd/hfi/hfi.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/init.h>
@@ -95,7 +94,6 @@ struct amd_hfi_classes {
* struct amd_hfi_cpuinfo - HFI workload class info per CPU
* @cpu: CPU index
* @apic_id: APIC id of the current CPU
- * @cpus: mask of CPUs associated with amd_hfi_cpuinfo
* @class_index: workload class ID index
* @nr_class: max number of workload class supported
* @ipcc_scores: ipcc scores for each class
@@ -106,7 +104,6 @@ struct amd_hfi_classes {
struct amd_hfi_cpuinfo {
int cpu;
u32 apic_id;
- cpumask_var_t cpus;
s16 class_index;
u8 nr_class;
int *ipcc_scores;
@@ -295,11 +292,6 @@ static int amd_hfi_online(unsigned int cpu)
guard(mutex)(&hfi_cpuinfo_lock);
- if (!zalloc_cpumask_var(&hfi_info->cpus, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_set_cpu(cpu, hfi_info->cpus);
-
ret = amd_hfi_set_state(cpu, true);
if (ret)
pr_err("WCT enable failed for CPU %u\n", cpu);
@@ -329,8 +321,6 @@ static int amd_hfi_offline(unsigned int cpu)
if (ret)
pr_err("WCT disable failed for CPU %u\n", cpu);
- free_cpumask_var(hfi_info->cpus);
-
return ret;
}
@@ -515,7 +505,6 @@ static int amd_hfi_probe(struct platform_device *pdev)
static struct platform_driver amd_hfi_driver = {
.driver = {
.name = AMD_HFI_DRIVER,
- .owner = THIS_MODULE,
.pm = &amd_hfi_pm_ops,
.acpi_match_table = ACPI_PTR(amd_hfi_platform_match),
},
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index d0b74d243ce4..97ed71593bdf 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -22,12 +22,11 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <linux/topology.h>
#include <linux/uuid.h>
#include <uapi/asm-generic/errno-base.h>
-#include <asm/amd/node.h>
-
#include "hsmp.h"
#define DRIVER_NAME "hsmp_acpi"
@@ -586,9 +585,9 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_num_nodes();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) {
- dev_err(&pdev->dev, "Wrong number of sockets\n");
+ hsmp_pdev->num_sockets = topology_max_packages();
+ if (!hsmp_pdev->num_sockets) {
+ dev_err(&pdev->dev, "No CPU sockets detected\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index a184922bba8d..faf15a8f74bb 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -114,14 +114,14 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
{
struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 207a0b33d8d3..5469fefb6001 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -76,14 +76,14 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
pc = &config_store.mode_set[src][idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index bc544a4a5266..8fc293c9c538 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -131,7 +131,7 @@ static void amd_pmf_get_metrics(struct work_struct *work)
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
@@ -289,8 +289,8 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
hi = phys_addr >> 32;
low = phys_addr & GENMASK(31, 0);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL);
return 0;
}
@@ -465,9 +465,17 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
- mutex_init(&dev->cb_mutex);
+ err = devm_mutex_init(dev->dev, &dev->lock);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->update_mutex);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->cb_mutex);
+ if (err)
+ return err;
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
@@ -491,9 +499,6 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
- mutex_destroy(&dev->cb_mutex);
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index bd19f2a6bc78..9144c8c3bbaf 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -119,6 +119,13 @@ struct cookie_header {
#define APTS_MAX_STATES 16
#define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7)
+#define BIOS_INPUTS_MAX 10
+
+/* amd_pmf_send_cmd() set/get */
+#define SET_CMD false
+#define GET_CMD true
+
+#define METRICS_TABLE_ID 7
typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data);
@@ -204,7 +211,7 @@ struct apmf_sbios_req_v1 {
u8 skin_temp_apu;
u8 skin_temp_hs2;
u8 enable_cnqf;
- u32 custom_policy[10];
+ u32 custom_policy[BIOS_INPUTS_MAX];
} __packed;
struct apmf_sbios_req_v2 {
@@ -216,7 +223,7 @@ struct apmf_sbios_req_v2 {
u32 stt_min_limit;
u8 skin_temp_apu;
u8 skin_temp_hs2;
- u32 custom_policy[10];
+ u32 custom_policy[BIOS_INPUTS_MAX];
} __packed;
struct apmf_fan_idx {
@@ -243,12 +250,12 @@ struct smu_pmf_metrics_v2 {
u16 vclk_freq; /* MHz */
u16 vcn_activity; /* VCN busy % [0-100] */
u16 vpeclk_freq; /* MHz */
- u16 ipuclk_freq; /* MHz */
- u16 ipu_busy[8]; /* NPU busy % [0-100] */
+ u16 npuclk_freq; /* MHz */
+ u16 npu_busy[8]; /* NPU busy % [0-100] */
u16 dram_reads; /* MB/sec */
u16 dram_writes; /* MB/sec */
u16 core_c0residency[16]; /* C0 residency % [0-100] */
- u16 ipu_power; /* mW */
+ u16 npu_power; /* mW */
u32 apu_power; /* mW */
u32 gfx_power; /* mW */
u32 dgpu_power; /* mW */
@@ -257,9 +264,9 @@ struct smu_pmf_metrics_v2 {
u32 filter_alpha_value; /* time constant [us] */
u32 metrics_counter;
u16 memclk_freq; /* MHz */
- u16 mpipuclk_freq; /* MHz */
- u16 ipu_reads; /* MB/sec */
- u16 ipu_writes; /* MB/sec */
+ u16 mpnpuclk_freq; /* MHz */
+ u16 npu_reads; /* MB/sec */
+ u16 npu_writes; /* MB/sec */
u32 throttle_residency_prochot;
u32 throttle_residency_spl;
u32 throttle_residency_fppt;
@@ -355,7 +362,7 @@ enum power_modes_v2 {
};
struct pmf_bios_inputs_prev {
- u32 custom_bios_inputs[10];
+ u32 custom_bios_inputs[BIOS_INPUTS_MAX];
};
struct amd_pmf_dev {
@@ -451,7 +458,7 @@ struct os_power_slider {
struct amd_pmf_notify_smart_pc_update {
u16 size;
u32 pending_req;
- u32 custom_bios[10];
+ u32 custom_bios[BIOS_INPUTS_MAX];
} __packed;
struct fan_table_control {
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 85192c7536b8..0a37dc6a7950 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -202,7 +202,7 @@ static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ta
{
/* Get the updated metrics table data */
memset(dev->buf, 0, dev->mtable_size);
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
switch (dev->cpu_id) {
case AMD_CPU_ID_PS:
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index c28f3c5744c2..0b70a5153f46 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -192,15 +192,15 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
{
- amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
- amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD,
apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
apts_config_store.val[idx].stt_min_limit, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
NULL);
}
@@ -211,30 +211,30 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
int src = amd_pmf_get_power_source();
if (op == SLIDER_OP_SET) {
- amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD,
config_store.prop[src][idx].sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
config_store.prop[src][idx].stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
NULL);
} else if (op == SLIDER_OP_GET) {
- amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
- amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
- amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
- amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, GET_CMD, ARG_NONE,
&table->prop[src][idx].sppt_apu_only);
- amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, GET_CMD, ARG_NONE,
&table->prop[src][idx].stt_min);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
}
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 6e8116bef4f6..0abce76f89ff 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -73,17 +73,56 @@ static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
input_sync(dev->pmf_idev);
}
+static int amd_pmf_get_bios_output_idx(u32 action_idx)
+{
+ switch (action_idx) {
+ case PMF_POLICY_BIOS_OUTPUT_1:
+ return 0;
+ case PMF_POLICY_BIOS_OUTPUT_2:
+ return 1;
+ case PMF_POLICY_BIOS_OUTPUT_3:
+ return 2;
+ case PMF_POLICY_BIOS_OUTPUT_4:
+ return 3;
+ case PMF_POLICY_BIOS_OUTPUT_5:
+ return 4;
+ case PMF_POLICY_BIOS_OUTPUT_6:
+ return 5;
+ case PMF_POLICY_BIOS_OUTPUT_7:
+ return 6;
+ case PMF_POLICY_BIOS_OUTPUT_8:
+ return 7;
+ case PMF_POLICY_BIOS_OUTPUT_9:
+ return 8;
+ case PMF_POLICY_BIOS_OUTPUT_10:
+ return 9;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void amd_pmf_update_bios_output(struct amd_pmf_dev *pdev, struct ta_pmf_action *action)
+{
+ u32 bios_idx;
+
+ bios_idx = amd_pmf_get_bios_output_idx(action->action_index);
+
+ amd_pmf_smartpc_apply_bios_output(pdev, action->value, BIT(bios_idx), bios_idx);
+}
+
static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out)
{
+ struct ta_pmf_action *action;
u32 val;
int idx;
for (idx = 0; idx < out->actions_count; idx++) {
- val = out->actions_list[idx].value;
- switch (out->actions_list[idx].action_index) {
+ action = &out->actions_list[idx];
+ val = action->value;
+ switch (action->action_index) {
case PMF_POLICY_SPL:
if (dev->prev_data->spl != val) {
- amd_pmf_send_cmd(dev, SET_SPL, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPL: %u\n", val);
dev->prev_data->spl = val;
}
@@ -91,7 +130,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_SPPT:
if (dev->prev_data->sppt != val) {
- amd_pmf_send_cmd(dev, SET_SPPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPPT: %u\n", val);
dev->prev_data->sppt = val;
}
@@ -99,7 +138,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_FPPT:
if (dev->prev_data->fppt != val) {
- amd_pmf_send_cmd(dev, SET_FPPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update FPPT: %u\n", val);
dev->prev_data->fppt = val;
}
@@ -107,7 +146,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_SPPT_APU_ONLY:
if (dev->prev_data->sppt_apuonly != val) {
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val);
dev->prev_data->sppt_apuonly = val;
}
@@ -115,7 +154,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_MIN:
if (dev->prev_data->stt_minlimit != val) {
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update STT_MIN: %u\n", val);
dev->prev_data->stt_minlimit = val;
}
@@ -123,7 +162,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_APU:
if (dev->prev_data->stt_skintemp_apu != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
dev->prev_data->stt_skintemp_apu = val;
@@ -132,7 +171,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_HS2:
if (dev->prev_data->stt_skintemp_hs2 != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
dev->prev_data->stt_skintemp_hs2 = val;
@@ -141,7 +180,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_P3T:
if (dev->prev_data->p3t_limit != val) {
- amd_pmf_send_cmd(dev, SET_P3T, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_P3T, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update P3T: %u\n", val);
dev->prev_data->p3t_limit = val;
}
@@ -149,7 +188,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_PMF_PPT:
if (dev->prev_data->pmf_ppt != val) {
- amd_pmf_send_cmd(dev, SET_PMF_PPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update PMF PPT: %u\n", val);
dev->prev_data->pmf_ppt = val;
}
@@ -157,7 +196,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_PMF_PPT_APU_ONLY:
if (dev->prev_data->pmf_ppt_apu_only != val) {
- amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val);
dev->prev_data->pmf_ppt_apu_only = val;
}
@@ -183,43 +222,16 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
break;
case PMF_POLICY_BIOS_OUTPUT_1:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(0), 0);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_2:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(1), 1);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_3:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(2), 2);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_4:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(3), 3);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_5:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(4), 4);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_6:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(5), 5);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_7:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(6), 6);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_8:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(7), 7);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_9:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(8), 8);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_10:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(9), 9);
+ amd_pmf_update_bios_output(dev, action);
break;
}
}
diff --git a/drivers/platform/x86/asus-armoury.c b/drivers/platform/x86/asus-armoury.c
new file mode 100644
index 000000000000..9c1a9ad42bc4
--- /dev/null
+++ b/drivers/platform/x86/asus-armoury.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Asus Armoury (WMI) attributes driver.
+ *
+ * This driver uses the fw_attributes class to expose various WMI functions
+ * that are present in many gaming and some non-gaming ASUS laptops.
+ *
+ * These typically don't fit anywhere else in the sysfs such as under LED class,
+ * hwmon or others, and are set in Windows using the ASUS Armoury Crate tool.
+ *
+ * Copyright(C) 2024 Luke Jones <luke@ljones.dev>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/printk.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+
+#include "asus-armoury.h"
+#include "firmware_attributes_class.h"
+
+#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C"
+
+#define ASUS_MINI_LED_MODE_MASK GENMASK(1, 0)
+/* Standard modes for devices with only on/off */
+#define ASUS_MINI_LED_OFF 0x00
+#define ASUS_MINI_LED_ON 0x01
+/* Like "on" but the effect is more vibrant or brighter */
+#define ASUS_MINI_LED_STRONG_MODE 0x02
+/* New modes for devices with 3 mini-led mode types */
+#define ASUS_MINI_LED_2024_WEAK 0x00
+#define ASUS_MINI_LED_2024_STRONG 0x01
+#define ASUS_MINI_LED_2024_OFF 0x02
+
+/* Power tunable attribute name defines */
+#define ATTR_PPT_PL1_SPL "ppt_pl1_spl"
+#define ATTR_PPT_PL2_SPPT "ppt_pl2_sppt"
+#define ATTR_PPT_PL3_FPPT "ppt_pl3_fppt"
+#define ATTR_PPT_APU_SPPT "ppt_apu_sppt"
+#define ATTR_PPT_PLATFORM_SPPT "ppt_platform_sppt"
+#define ATTR_NV_DYNAMIC_BOOST "nv_dynamic_boost"
+#define ATTR_NV_TEMP_TARGET "nv_temp_target"
+#define ATTR_NV_BASE_TGP "nv_base_tgp"
+#define ATTR_NV_TGP "nv_tgp"
+
+#define ASUS_ROG_TUNABLE_DC 0
+#define ASUS_ROG_TUNABLE_AC 1
+
+struct rog_tunables {
+ const struct power_limits *power_limits;
+ u32 ppt_pl1_spl; // cpu
+ u32 ppt_pl2_sppt; // cpu
+ u32 ppt_pl3_fppt; // cpu
+ u32 ppt_apu_sppt; // plat
+ u32 ppt_platform_sppt; // plat
+
+ u32 nv_dynamic_boost;
+ u32 nv_temp_target;
+ u32 nv_tgp;
+};
+
+struct asus_armoury_priv {
+ struct device *fw_attr_dev;
+ struct kset *fw_attr_kset;
+
+ /*
+ * Mutex to protect eGPU activation/deactivation
+ * sequences and dGPU connection status:
+ * do not allow concurrent changes or changes
+ * before a reboot if dGPU got disabled.
+ */
+ struct mutex egpu_mutex;
+
+ /* Index 0 for DC, 1 for AC */
+ struct rog_tunables *rog_tunables[2];
+
+ u32 mini_led_dev_id;
+ u32 gpu_mux_dev_id;
+};
+
+static struct asus_armoury_priv asus_armoury = {
+ .egpu_mutex = __MUTEX_INITIALIZER(asus_armoury.egpu_mutex),
+};
+
+struct fw_attrs_group {
+ bool pending_reboot;
+};
+
+static struct fw_attrs_group fw_attrs = {
+ .pending_reboot = false,
+};
+
+struct asus_attr_group {
+ const struct attribute_group *attr_group;
+ u32 wmi_devid;
+};
+
+static void asus_set_reboot_and_signal_event(void)
+{
+ fw_attrs.pending_reboot = true;
+ kobject_uevent(&asus_armoury.fw_attr_dev->kobj, KOBJ_CHANGE);
+}
+
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", fw_attrs.pending_reboot);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
+static bool asus_bios_requires_reboot(struct kobj_attribute *attr)
+{
+ return !strcmp(attr->attr.name, "gpu_mux_mode") ||
+ !strcmp(attr->attr.name, "panel_hd_mode");
+}
+
+/**
+ * armoury_has_devstate() - Check presence of the WMI function state.
+ *
+ * @dev_id: The WMI method ID to check for presence.
+ *
+ * Returns: true iif method is supported.
+ */
+static bool armoury_has_devstate(u32 dev_id)
+{
+ u32 retval;
+ int status;
+
+ status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, &retval);
+ pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval);
+
+ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
+}
+
+/**
+ * armoury_get_devstate() - Get the WMI function state.
+ * @attr: NULL or the kobj_attribute associated to called WMI function.
+ * @dev_id: The WMI method ID to call.
+ * @retval:
+ * * non-NULL pointer to where to store the value returned from WMI
+ * * with the function presence bit cleared.
+ *
+ * Intended usage is from sysfs attribute checking associated WMI function.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+static int armoury_get_devstate(struct kobj_attribute *attr, u32 *retval, u32 dev_id)
+{
+ int err;
+
+ err = asus_wmi_get_devstate_dsts(dev_id, retval);
+ if (err) {
+ if (attr)
+ pr_err("Failed to get %s: %d\n", attr->attr.name, err);
+ else
+ pr_err("Failed to get devstate for 0x%x: %d\n", dev_id, err);
+
+ return err;
+ }
+
+ /*
+ * asus_wmi_get_devstate_dsts will populate retval with WMI return, but
+ * the true value is expressed when ASUS_WMI_DSTS_PRESENCE_BIT is clear.
+ */
+ *retval &= ~ASUS_WMI_DSTS_PRESENCE_BIT;
+
+ return 0;
+}
+
+/**
+ * armoury_set_devstate() - Set the WMI function state.
+ * @attr: The kobj_attribute associated to called WMI function.
+ * @dev_id: The WMI method ID to call.
+ * @value: The new value to be set.
+ * @retval: Where to store the value returned from WMI or NULL.
+ *
+ * Intended usage is from sysfs attribute setting associated WMI function.
+ * Before calling the presence of the function should be checked.
+ *
+ * Every WMI write MUST go through this function to enforce safety checks.
+ *
+ * Results !1 is usually considered a fail by ASUS, but some WMI methods
+ * (like eGPU or CPU cores) do use > 1 to return a status code or similar:
+ * in these cases caller is interested in the actual return value
+ * and should perform relevant checks.
+ *
+ * Returns:
+ * * %-EINVAL - attempt to set a dangerous or unsupported value.
+ * * %-EIO - WMI function returned an error.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+static int armoury_set_devstate(struct kobj_attribute *attr,
+ u32 value, u32 *retval, u32 dev_id)
+{
+ u32 result;
+ int err;
+
+ /*
+ * Prevent developers from bricking devices or issuing dangerous
+ * commands that can be difficult or impossible to recover from.
+ */
+ switch (dev_id) {
+ case ASUS_WMI_DEVID_APU_MEM:
+ /*
+ * A hard reset might suffice to save the device,
+ * but there is no value in sending these commands.
+ */
+ if (value == 0x100 || value == 0x101) {
+ pr_err("Refusing to set APU memory to unsafe value: 0x%x\n", value);
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* No problems are known for this dev_id */
+ break;
+ }
+
+ err = asus_wmi_set_devstate(dev_id, value, retval ? retval : &result);
+ if (err) {
+ if (attr)
+ pr_err("Failed to set %s: %d\n", attr->attr.name, err);
+ else
+ pr_err("Failed to set devstate for 0x%x: %d\n", dev_id, err);
+
+ return err;
+ }
+
+ /*
+ * If retval == NULL caller is uninterested in return value:
+ * perform the most common result check here.
+ */
+ if ((retval == NULL) && (result == 0)) {
+ pr_err("Failed to set %s: (result): 0x%x\n", attr->attr.name, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int armoury_attr_enum_list(char *buf, size_t enum_values)
+{
+ size_t i;
+ int len = 0;
+
+ for (i = 0; i < enum_values; i++) {
+ if (i == 0)
+ len += sysfs_emit_at(buf, len, "%zu", i);
+ else
+ len += sysfs_emit_at(buf, len, ";%zu", i);
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count, u32 min, u32 max,
+ u32 *store_value, u32 wmi_dev)
+{
+ u32 value;
+ int err;
+
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value < min || value > max)
+ return -EINVAL;
+
+ err = armoury_set_devstate(attr, value, NULL, wmi_dev);
+ if (err)
+ return err;
+
+ if (store_value != NULL)
+ *store_value = value;
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ if (asus_bios_requires_reboot(attr))
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+
+ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf, u32 wmi_dev)
+{
+ u32 result;
+ int err;
+
+ err = armoury_get_devstate(attr, &result, wmi_dev);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", result);
+}
+
+static ssize_t enum_type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "enumeration\n");
+}
+
+static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "integer\n");
+}
+
+/* Mini-LED mode **************************************************************/
+
+/* Values map for mini-led modes on 2023 and earlier models. */
+static u32 mini_led_mode1_map[] = {
+ [0] = ASUS_MINI_LED_OFF,
+ [1] = ASUS_MINI_LED_ON,
+};
+
+/* Values map for mini-led modes on 2024 and later models. */
+static u32 mini_led_mode2_map[] = {
+ [0] = ASUS_MINI_LED_2024_OFF,
+ [1] = ASUS_MINI_LED_2024_WEAK,
+ [2] = ASUS_MINI_LED_2024_STRONG,
+};
+
+static ssize_t mini_led_mode_current_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ u32 *mini_led_mode_map;
+ size_t mini_led_mode_map_size;
+ u32 i, mode;
+ int err;
+
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ mini_led_mode_map = mini_led_mode1_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map);
+ break;
+
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ mini_led_mode_map = mini_led_mode2_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map);
+ break;
+
+ default:
+ pr_err("Unrecognized mini-LED device: %u\n", asus_armoury.mini_led_dev_id);
+ return -ENODEV;
+ }
+
+ err = armoury_get_devstate(attr, &mode, asus_armoury.mini_led_dev_id);
+ if (err)
+ return err;
+
+ mode = FIELD_GET(ASUS_MINI_LED_MODE_MASK, 0);
+
+ for (i = 0; i < mini_led_mode_map_size; i++)
+ if (mode == mini_led_mode_map[i])
+ return sysfs_emit(buf, "%u\n", i);
+
+ pr_warn("Unrecognized mini-LED mode: %u", mode);
+ return -EINVAL;
+}
+
+static ssize_t mini_led_mode_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 *mini_led_mode_map;
+ size_t mini_led_mode_map_size;
+ u32 mode;
+ int err;
+
+ err = kstrtou32(buf, 10, &mode);
+ if (err)
+ return err;
+
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ mini_led_mode_map = mini_led_mode1_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map);
+ break;
+
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ mini_led_mode_map = mini_led_mode2_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map);
+ break;
+
+ default:
+ pr_err("Unrecognized mini-LED devid: %u\n", asus_armoury.mini_led_dev_id);
+ return -EINVAL;
+ }
+
+ if (mode >= mini_led_mode_map_size) {
+ pr_warn("mini-LED mode unrecognized device: %u\n", mode);
+ return -ENODEV;
+ }
+
+ return armoury_attr_uint_store(kobj, attr, buf, count,
+ 0, mini_led_mode_map[mode],
+ NULL, asus_armoury.mini_led_dev_id);
+}
+
+static ssize_t mini_led_mode_possible_values_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode1_map));
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode2_map));
+ default:
+ return -ENODEV;
+ }
+}
+ASUS_ATTR_GROUP_ENUM(mini_led_mode, "mini_led_mode", "Set the mini-LED backlight mode");
+
+static ssize_t gpu_mux_mode_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ bool optimus;
+
+ err = kstrtobool(buf, &optimus);
+ if (err)
+ return err;
+
+ if (armoury_has_devstate(ASUS_WMI_DEVID_DGPU)) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_DGPU);
+ if (err)
+ return err;
+ if (result && !optimus) {
+ pr_warn("Cannot switch MUX to dGPU mode when dGPU is disabled: %02X\n",
+ result);
+ return -ENODEV;
+ }
+ }
+
+ if (armoury_has_devstate(ASUS_WMI_DEVID_EGPU)) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU);
+ if (err)
+ return err;
+ if (result && !optimus) {
+ pr_warn("Cannot switch MUX to dGPU mode when eGPU is enabled\n");
+ return -EBUSY;
+ }
+ }
+
+ err = armoury_set_devstate(attr, optimus ? 1 : 0, NULL, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+ASUS_WMI_SHOW_INT(gpu_mux_mode_current_value, asus_armoury.gpu_mux_dev_id);
+ASUS_ATTR_GROUP_BOOL(gpu_mux_mode, "gpu_mux_mode", "Set the GPU display MUX mode");
+
+static ssize_t dgpu_disable_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ int result, err;
+ bool disable;
+
+ err = kstrtobool(buf, &disable);
+ if (err)
+ return err;
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+ if (!result && disable) {
+ pr_warn("Cannot disable dGPU when the MUX is in dGPU mode\n");
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ err = armoury_set_devstate(attr, disable ? 1 : 0, NULL, ASUS_WMI_DEVID_DGPU);
+ if (err)
+ return err;
+ }
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ return count;
+}
+ASUS_WMI_SHOW_INT(dgpu_disable_current_value, ASUS_WMI_DEVID_DGPU);
+ASUS_ATTR_GROUP_BOOL(dgpu_disable, "dgpu_disable", "Disable the dGPU");
+
+/* Values map for eGPU activation requests. */
+static u32 egpu_status_map[] = {
+ [0] = 0x00000000U,
+ [1] = 0x00000001U,
+ [2] = 0x00000101U,
+ [3] = 0x00000201U,
+};
+
+/*
+ * armoury_pci_rescan() - Performs a PCI rescan
+ *
+ * Bring up any GPU that has been hotplugged in the system.
+ */
+static void armoury_pci_rescan(void)
+{
+ struct pci_bus *b = NULL;
+
+ pci_lock_rescan_remove();
+ while ((b = pci_find_next_bus(b)) != NULL)
+ pci_rescan_bus(b);
+ pci_unlock_rescan_remove();
+}
+
+/*
+ * The ACPI call to enable the eGPU might also disable the internal dGPU,
+ * but this is not always the case and on certain models enabling the eGPU
+ * when the dGPU is either still active or has been disabled without rebooting
+ * will make both GPUs malfunction and the kernel will detect many
+ * PCI AER unrecoverable errors.
+ */
+static ssize_t egpu_enable_current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ u32 requested, enable, result;
+
+ err = kstrtou32(buf, 10, &requested);
+ if (err)
+ return err;
+
+ if (requested >= ARRAY_SIZE(egpu_status_map))
+ return -EINVAL;
+ enable = egpu_status_map[requested];
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ /* Ensure the eGPU is connected before attempting to activate it. */
+ if (enable) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU_CONNECTED);
+ if (err) {
+ pr_warn("Failed to get eGPU connection status: %d\n", err);
+ return err;
+ }
+ if (!result) {
+ pr_warn("Cannot activate eGPU while undetected\n");
+ return -ENOENT;
+ }
+ }
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+
+ if (!result && enable) {
+ pr_warn("Cannot enable eGPU when the MUX is in dGPU mode\n");
+ return -ENODEV;
+ }
+ }
+
+ err = armoury_set_devstate(attr, enable, &result, ASUS_WMI_DEVID_EGPU);
+ if (err) {
+ pr_err("Failed to set %s: %d\n", attr->attr.name, err);
+ return err;
+ }
+
+ /*
+ * ACPI returns value 0x01 on success and 0x02 on a partial activation:
+ * performing a pci rescan will bring up the device in pci-e 3.0 speed,
+ * after a reboot the device will work at full speed.
+ */
+ switch (result) {
+ case 0x01:
+ /*
+ * When a GPU is in use it does not get disconnected even if
+ * the ACPI call returns a success.
+ */
+ if (!enable) {
+ err = armoury_get_devstate(attr, &result, ASUS_WMI_DEVID_EGPU);
+ if (err) {
+ pr_warn("Failed to ensure eGPU is deactivated: %d\n", err);
+ return err;
+ }
+
+ if (result != 0)
+ return -EBUSY;
+ }
+
+ pr_debug("Success changing the eGPU status\n");
+ break;
+ case 0x02:
+ pr_info("Success changing the eGPU status, a reboot is strongly advised\n");
+ asus_set_reboot_and_signal_event();
+ break;
+ default:
+ pr_err("Failed to change the eGPU status: wmi result is 0x%x\n", result);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Perform a PCI rescan: on every tested model this is necessary
+ * to make the eGPU visible on the bus without rebooting.
+ */
+ armoury_pci_rescan();
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ return count;
+}
+
+static ssize_t egpu_enable_current_value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int i, err;
+ u32 status;
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ err = armoury_get_devstate(attr, &status, ASUS_WMI_DEVID_EGPU);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(egpu_status_map); i++) {
+ if (egpu_status_map[i] == status)
+ return sysfs_emit(buf, "%u\n", i);
+ }
+
+ return -EIO;
+}
+
+static ssize_t egpu_enable_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(egpu_status_map));
+}
+ASUS_ATTR_GROUP_ENUM(egpu_enable, "egpu_enable", "Enable the eGPU (also disables dGPU)");
+
+/* Device memory available to APU */
+
+/*
+ * Values map for APU reserved memory (index + 1 number of GB).
+ * Some looks out of order, but are actually correct.
+ */
+static u32 apu_mem_map[] = {
+ [0] = 0x000, /* called "AUTO" on the BIOS, is the minimum available */
+ [1] = 0x102,
+ [2] = 0x103,
+ [3] = 0x104,
+ [4] = 0x105,
+ [5] = 0x107,
+ [6] = 0x108,
+ [7] = 0x109,
+ [8] = 0x106,
+};
+
+static ssize_t apu_mem_current_value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int err;
+ u32 mem;
+
+ err = armoury_get_devstate(attr, &mem, ASUS_WMI_DEVID_APU_MEM);
+ if (err)
+ return err;
+
+ /* After 0x000 is set, a read will return 0x100 */
+ if (mem == 0x100)
+ return sysfs_emit(buf, "0\n");
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(apu_mem_map); i++) {
+ if (apu_mem_map[i] == mem)
+ return sysfs_emit(buf, "%u\n", i);
+ }
+
+ pr_warn("Unrecognised value for APU mem 0x%08x\n", mem);
+ return -EIO;
+}
+
+static ssize_t apu_mem_current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 requested, mem;
+
+ result = kstrtou32(buf, 10, &requested);
+ if (result)
+ return result;
+
+ if (requested >= ARRAY_SIZE(apu_mem_map))
+ return -EINVAL;
+ mem = apu_mem_map[requested];
+
+ err = armoury_set_devstate(attr, mem, NULL, ASUS_WMI_DEVID_APU_MEM);
+ if (err) {
+ pr_warn("Failed to set apu_mem 0x%x: %d\n", mem, err);
+ return err;
+ }
+
+ pr_info("APU memory changed to %uGB, reboot required\n", requested + 1);
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+
+static ssize_t apu_mem_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(apu_mem_map));
+}
+ASUS_ATTR_GROUP_ENUM(apu_mem, "apu_mem", "Set available system RAM (in GB) for the APU to use");
+
+/* Define helper to access the current power mode tunable values */
+static inline struct rog_tunables *get_current_tunables(void)
+{
+ if (power_supply_is_system_supplied())
+ return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC];
+
+ return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC];
+}
+
+/* Simple attribute creation */
+ASUS_ATTR_GROUP_ENUM_INT_RO(charge_mode, "charge_mode", ASUS_WMI_DEVID_CHARGE_MODE, "0;1;2\n",
+ "Show the current mode of charging");
+ASUS_ATTR_GROUP_BOOL_RW(boot_sound, "boot_sound", ASUS_WMI_DEVID_BOOT_SOUND,
+ "Set the boot POST sound");
+ASUS_ATTR_GROUP_BOOL_RW(mcu_powersave, "mcu_powersave", ASUS_WMI_DEVID_MCU_POWERSAVE,
+ "Set MCU powersaving mode");
+ASUS_ATTR_GROUP_BOOL_RW(panel_od, "panel_overdrive", ASUS_WMI_DEVID_PANEL_OD,
+ "Set the panel refresh overdrive");
+ASUS_ATTR_GROUP_BOOL_RW(panel_hd_mode, "panel_hd_mode", ASUS_WMI_DEVID_PANEL_HD,
+ "Set the panel HD mode to UHD<0> or FHD<1>");
+ASUS_ATTR_GROUP_BOOL_RW(screen_auto_brightness, "screen_auto_brightness",
+ ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS,
+ "Set the panel brightness to Off<0> or On<1>");
+ASUS_ATTR_GROUP_BOOL_RO(egpu_connected, "egpu_connected", ASUS_WMI_DEVID_EGPU_CONNECTED,
+ "Show the eGPU connection status");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl1_spl, ATTR_PPT_PL1_SPL, ASUS_WMI_DEVID_PPT_PL1_SPL,
+ "Set the CPU slow package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl2_sppt, ATTR_PPT_PL2_SPPT, ASUS_WMI_DEVID_PPT_PL2_SPPT,
+ "Set the CPU fast package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl3_fppt, ATTR_PPT_PL3_FPPT, ASUS_WMI_DEVID_PPT_PL3_FPPT,
+ "Set the CPU fastest package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_apu_sppt, ATTR_PPT_APU_SPPT, ASUS_WMI_DEVID_PPT_APU_SPPT,
+ "Set the APU package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_platform_sppt, ATTR_PPT_PLATFORM_SPPT, ASUS_WMI_DEVID_PPT_PLAT_SPPT,
+ "Set the platform package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_dynamic_boost, ATTR_NV_DYNAMIC_BOOST, ASUS_WMI_DEVID_NV_DYN_BOOST,
+ "Set the Nvidia dynamic boost limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_temp_target, ATTR_NV_TEMP_TARGET, ASUS_WMI_DEVID_NV_THERM_TARGET,
+ "Set the Nvidia max thermal limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_tgp, "nv_tgp", ASUS_WMI_DEVID_DGPU_SET_TGP,
+ "Set the additional TGP on top of the base TGP");
+ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(nv_base_tgp, ATTR_NV_BASE_TGP, ASUS_WMI_DEVID_DGPU_BASE_TGP,
+ "Read the base TGP value");
+
+/* If an attribute does not require any special case handling add it here */
+static const struct asus_attr_group armoury_attr_groups[] = {
+ { &egpu_connected_attr_group, ASUS_WMI_DEVID_EGPU_CONNECTED },
+ { &egpu_enable_attr_group, ASUS_WMI_DEVID_EGPU },
+ { &dgpu_disable_attr_group, ASUS_WMI_DEVID_DGPU },
+ { &apu_mem_attr_group, ASUS_WMI_DEVID_APU_MEM },
+
+ { &ppt_pl1_spl_attr_group, ASUS_WMI_DEVID_PPT_PL1_SPL },
+ { &ppt_pl2_sppt_attr_group, ASUS_WMI_DEVID_PPT_PL2_SPPT },
+ { &ppt_pl3_fppt_attr_group, ASUS_WMI_DEVID_PPT_PL3_FPPT },
+ { &ppt_apu_sppt_attr_group, ASUS_WMI_DEVID_PPT_APU_SPPT },
+ { &ppt_platform_sppt_attr_group, ASUS_WMI_DEVID_PPT_PLAT_SPPT },
+ { &nv_dynamic_boost_attr_group, ASUS_WMI_DEVID_NV_DYN_BOOST },
+ { &nv_temp_target_attr_group, ASUS_WMI_DEVID_NV_THERM_TARGET },
+ { &nv_base_tgp_attr_group, ASUS_WMI_DEVID_DGPU_BASE_TGP },
+ { &nv_tgp_attr_group, ASUS_WMI_DEVID_DGPU_SET_TGP },
+
+ { &charge_mode_attr_group, ASUS_WMI_DEVID_CHARGE_MODE },
+ { &boot_sound_attr_group, ASUS_WMI_DEVID_BOOT_SOUND },
+ { &mcu_powersave_attr_group, ASUS_WMI_DEVID_MCU_POWERSAVE },
+ { &panel_od_attr_group, ASUS_WMI_DEVID_PANEL_OD },
+ { &panel_hd_mode_attr_group, ASUS_WMI_DEVID_PANEL_HD },
+ { &screen_auto_brightness_attr_group, ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS },
+};
+
+/**
+ * is_power_tunable_attr - Determines if an attribute is a power-related tunable
+ * @name: The name of the attribute to check
+ *
+ * This function checks if the given attribute name is related to power tuning.
+ *
+ * Return: true if the attribute is a power-related tunable, false otherwise
+ */
+static bool is_power_tunable_attr(const char *name)
+{
+ static const char * const power_tunable_attrs[] = {
+ ATTR_PPT_PL1_SPL, ATTR_PPT_PL2_SPPT,
+ ATTR_PPT_PL3_FPPT, ATTR_PPT_APU_SPPT,
+ ATTR_PPT_PLATFORM_SPPT, ATTR_NV_DYNAMIC_BOOST,
+ ATTR_NV_TEMP_TARGET, ATTR_NV_BASE_TGP,
+ ATTR_NV_TGP
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(power_tunable_attrs); i++) {
+ if (!strcmp(name, power_tunable_attrs[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * has_valid_limit - Checks if a power-related attribute has a valid limit value
+ * @name: The name of the attribute to check
+ * @limits: Pointer to the power_limits structure containing limit values
+ *
+ * This function checks if a power-related attribute has a valid limit value.
+ * It returns false if limits is NULL or if the corresponding limit value is zero.
+ *
+ * Return: true if the attribute has a valid limit value, false otherwise
+ */
+static bool has_valid_limit(const char *name, const struct power_limits *limits)
+{
+ u32 limit_value = 0;
+
+ if (!limits)
+ return false;
+
+ if (!strcmp(name, ATTR_PPT_PL1_SPL))
+ limit_value = limits->ppt_pl1_spl_max;
+ else if (!strcmp(name, ATTR_PPT_PL2_SPPT))
+ limit_value = limits->ppt_pl2_sppt_max;
+ else if (!strcmp(name, ATTR_PPT_PL3_FPPT))
+ limit_value = limits->ppt_pl3_fppt_max;
+ else if (!strcmp(name, ATTR_PPT_APU_SPPT))
+ limit_value = limits->ppt_apu_sppt_max;
+ else if (!strcmp(name, ATTR_PPT_PLATFORM_SPPT))
+ limit_value = limits->ppt_platform_sppt_max;
+ else if (!strcmp(name, ATTR_NV_DYNAMIC_BOOST))
+ limit_value = limits->nv_dynamic_boost_max;
+ else if (!strcmp(name, ATTR_NV_TEMP_TARGET))
+ limit_value = limits->nv_temp_target_max;
+ else if (!strcmp(name, ATTR_NV_BASE_TGP) ||
+ !strcmp(name, ATTR_NV_TGP))
+ limit_value = limits->nv_tgp_max;
+
+ return limit_value > 0;
+}
+
+static int asus_fw_attr_add(void)
+{
+ const struct rog_tunables *const ac_rog_tunables =
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC];
+ const struct power_limits *limits;
+ bool should_create;
+ const char *name;
+ int err, i;
+
+ asus_armoury.fw_attr_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(asus_armoury.fw_attr_dev)) {
+ err = PTR_ERR(asus_armoury.fw_attr_dev);
+ goto fail_class_get;
+ }
+
+ asus_armoury.fw_attr_kset = kset_create_and_add("attributes", NULL,
+ &asus_armoury.fw_attr_dev->kobj);
+ if (!asus_armoury.fw_attr_kset) {
+ err = -ENOMEM;
+ goto err_destroy_classdev;
+ }
+
+ err = sysfs_create_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+ if (err) {
+ pr_err("Failed to create sysfs level attributes\n");
+ goto err_destroy_kset;
+ }
+
+ asus_armoury.mini_led_dev_id = 0;
+ if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE))
+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE;
+ else if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE2))
+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2;
+
+ if (asus_armoury.mini_led_dev_id) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ &mini_led_mode_attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for mini_led\n");
+ goto err_remove_file;
+ }
+ }
+
+ asus_armoury.gpu_mux_dev_id = 0;
+ if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX))
+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX;
+ else if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX_VIVO))
+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX_VIVO;
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ &gpu_mux_mode_attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for gpu_mux\n");
+ goto err_remove_mini_led_group;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(armoury_attr_groups); i++) {
+ if (!armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ continue;
+
+ /* Always create by default, unless PPT is not present */
+ should_create = true;
+ name = armoury_attr_groups[i].attr_group->name;
+
+ /* Check if this is a power-related tunable requiring limits */
+ if (ac_rog_tunables && ac_rog_tunables->power_limits &&
+ is_power_tunable_attr(name)) {
+ limits = ac_rog_tunables->power_limits;
+ /* Check only AC: if not present then DC won't be either */
+ should_create = has_valid_limit(name, limits);
+ if (!should_create)
+ pr_debug("Missing max value for tunable %s\n", name);
+ }
+
+ if (should_create) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for %s\n",
+ armoury_attr_groups[i].attr_group->name);
+ goto err_remove_groups;
+ }
+ }
+ }
+
+ return 0;
+
+err_remove_groups:
+ while (i--) {
+ if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ }
+ if (asus_armoury.gpu_mux_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group);
+err_remove_mini_led_group:
+ if (asus_armoury.mini_led_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group);
+err_remove_file:
+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+err_destroy_kset:
+ kset_unregister(asus_armoury.fw_attr_kset);
+err_destroy_classdev:
+fail_class_get:
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+ return err;
+}
+
+/* Init / exit ****************************************************************/
+
+/* Set up the min/max and defaults for ROG tunables */
+static void init_rog_tunables(void)
+{
+ const struct power_limits *ac_limits, *dc_limits;
+ struct rog_tunables *ac_rog_tunables = NULL, *dc_rog_tunables = NULL;
+ const struct power_data *power_data;
+ const struct dmi_system_id *dmi_id;
+
+ /* Match the system against the power_limits table */
+ dmi_id = dmi_first_match(power_limits);
+ if (!dmi_id) {
+ pr_warn("No matching power limits found for this system\n");
+ return;
+ }
+
+ /* Get the power data for this system */
+ power_data = dmi_id->driver_data;
+ if (!power_data) {
+ pr_info("No power data available for this system\n");
+ return;
+ }
+
+ /* Initialize AC power tunables */
+ ac_limits = power_data->ac_data;
+ if (ac_limits) {
+ ac_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]),
+ GFP_KERNEL);
+ if (!ac_rog_tunables)
+ goto err_nomem;
+
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC] = ac_rog_tunables;
+ ac_rog_tunables->power_limits = ac_limits;
+
+ /* Set initial AC values */
+ ac_rog_tunables->ppt_pl1_spl =
+ ac_limits->ppt_pl1_spl_def ?
+ ac_limits->ppt_pl1_spl_def :
+ ac_limits->ppt_pl1_spl_max;
+
+ ac_rog_tunables->ppt_pl2_sppt =
+ ac_limits->ppt_pl2_sppt_def ?
+ ac_limits->ppt_pl2_sppt_def :
+ ac_limits->ppt_pl2_sppt_max;
+
+ ac_rog_tunables->ppt_pl3_fppt =
+ ac_limits->ppt_pl3_fppt_def ?
+ ac_limits->ppt_pl3_fppt_def :
+ ac_limits->ppt_pl3_fppt_max;
+
+ ac_rog_tunables->ppt_apu_sppt =
+ ac_limits->ppt_apu_sppt_def ?
+ ac_limits->ppt_apu_sppt_def :
+ ac_limits->ppt_apu_sppt_max;
+
+ ac_rog_tunables->ppt_platform_sppt =
+ ac_limits->ppt_platform_sppt_def ?
+ ac_limits->ppt_platform_sppt_def :
+ ac_limits->ppt_platform_sppt_max;
+
+ ac_rog_tunables->nv_dynamic_boost =
+ ac_limits->nv_dynamic_boost_max;
+ ac_rog_tunables->nv_temp_target =
+ ac_limits->nv_temp_target_max;
+ ac_rog_tunables->nv_tgp = ac_limits->nv_tgp_max;
+
+ pr_debug("AC power limits initialized for %s\n", dmi_id->matches[0].substr);
+ } else {
+ pr_debug("No AC PPT limits defined\n");
+ }
+
+ /* Initialize DC power tunables */
+ dc_limits = power_data->dc_data;
+ if (dc_limits) {
+ dc_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]),
+ GFP_KERNEL);
+ if (!dc_rog_tunables) {
+ kfree(ac_rog_tunables);
+ goto err_nomem;
+ }
+
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC] = dc_rog_tunables;
+ dc_rog_tunables->power_limits = dc_limits;
+
+ /* Set initial DC values */
+ dc_rog_tunables->ppt_pl1_spl =
+ dc_limits->ppt_pl1_spl_def ?
+ dc_limits->ppt_pl1_spl_def :
+ dc_limits->ppt_pl1_spl_max;
+
+ dc_rog_tunables->ppt_pl2_sppt =
+ dc_limits->ppt_pl2_sppt_def ?
+ dc_limits->ppt_pl2_sppt_def :
+ dc_limits->ppt_pl2_sppt_max;
+
+ dc_rog_tunables->ppt_pl3_fppt =
+ dc_limits->ppt_pl3_fppt_def ?
+ dc_limits->ppt_pl3_fppt_def :
+ dc_limits->ppt_pl3_fppt_max;
+
+ dc_rog_tunables->ppt_apu_sppt =
+ dc_limits->ppt_apu_sppt_def ?
+ dc_limits->ppt_apu_sppt_def :
+ dc_limits->ppt_apu_sppt_max;
+
+ dc_rog_tunables->ppt_platform_sppt =
+ dc_limits->ppt_platform_sppt_def ?
+ dc_limits->ppt_platform_sppt_def :
+ dc_limits->ppt_platform_sppt_max;
+
+ dc_rog_tunables->nv_dynamic_boost =
+ dc_limits->nv_dynamic_boost_max;
+ dc_rog_tunables->nv_temp_target =
+ dc_limits->nv_temp_target_max;
+ dc_rog_tunables->nv_tgp = dc_limits->nv_tgp_max;
+
+ pr_debug("DC power limits initialized for %s\n", dmi_id->matches[0].substr);
+ } else {
+ pr_debug("No DC PPT limits defined\n");
+ }
+
+ return;
+
+err_nomem:
+ pr_err("Failed to allocate memory for tunables\n");
+}
+
+static int __init asus_fw_init(void)
+{
+ char *wmi_uid;
+
+ wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID);
+ if (!wmi_uid)
+ return -ENODEV;
+
+ /*
+ * if equal to "ASUSWMI" then it's DCTS that can't be used for this
+ * driver, DSTS is required.
+ */
+ if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI))
+ return -ENODEV;
+
+ init_rog_tunables();
+
+ /* Must always be last step to ensure data is available */
+ return asus_fw_attr_add();
+}
+
+static void __exit asus_fw_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(armoury_attr_groups) - 1; i >= 0; i--) {
+ if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ }
+
+ if (asus_armoury.gpu_mux_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group);
+
+ if (asus_armoury.mini_led_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group);
+
+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+ kset_unregister(asus_armoury.fw_attr_kset);
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+
+ kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]);
+ kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]);
+}
+
+module_init(asus_fw_init);
+module_exit(asus_fw_exit);
+
+MODULE_IMPORT_NS("ASUS_WMI");
+MODULE_AUTHOR("Luke Jones <luke@ljones.dev>");
+MODULE_DESCRIPTION("ASUS BIOS Configuration Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" ASUS_NB_WMI_EVENT_GUID);
diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h
new file mode 100644
index 000000000000..a1bb2005c3f3
--- /dev/null
+++ b/drivers/platform/x86/asus-armoury.h
@@ -0,0 +1,1541 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Definitions for kernel modules using asus-armoury driver
+ *
+ * Copyright (c) 2024 Luke Jones <luke@ljones.dev>
+ */
+
+#ifndef _ASUS_ARMOURY_H_
+#define _ASUS_ARMOURY_H_
+
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "asus-armoury"
+
+/**
+ * armoury_attr_uint_store() - Send an uint to WMI method if within min/max.
+ * @kobj: Pointer to the driver object.
+ * @attr: Pointer to the attribute calling this function.
+ * @buf: The buffer to read from, this is parsed to `uint` type.
+ * @count: Required by sysfs attribute macros, pass in from the callee attr.
+ * @min: Minimum accepted value. Below this returns -EINVAL.
+ * @max: Maximum accepted value. Above this returns -EINVAL.
+ * @store_value: Pointer to where the parsed value should be stored.
+ * @wmi_dev: The WMI function ID to use.
+ *
+ * This function is intended to be generic so it can be called from any "_store"
+ * attribute which works only with integers.
+ *
+ * Integers to be sent to the WMI method is inclusive range checked and
+ * an error returned if out of range.
+ *
+ * If the value is valid and WMI is success then the sysfs attribute is notified
+ * and if asus_bios_requires_reboot() is true then reboot attribute
+ * is also notified.
+ *
+ * Returns: Either count, or an error.
+ */
+ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count, u32 min, u32 max,
+ u32 *store_value, u32 wmi_dev);
+
+/**
+ * armoury_attr_uint_show() - Receive an uint from a WMI method.
+ * @kobj: Pointer to the driver object.
+ * @attr: Pointer to the attribute calling this function.
+ * @buf: The buffer to write to, as an `uint` type.
+ * @wmi_dev: The WMI function ID to use.
+ *
+ * This function is intended to be generic so it can be called from any "_show"
+ * attribute which works only with integers.
+ *
+ * Returns: Either count, or an error.
+ */
+ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf, u32 wmi_dev);
+
+#define __ASUS_ATTR_RO(_func, _name) \
+ { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _func##_##_name##_show, \
+ }
+
+#define __ASUS_ATTR_RO_AS(_name, _show) \
+ { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _show, \
+ }
+
+#define __ASUS_ATTR_RW(_func, _name) \
+ __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store)
+
+#define __WMI_STORE_INT(_attr, _min, _max, _wmi) \
+ static ssize_t _attr##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ return armoury_attr_uint_store(kobj, attr, buf, count, _min, \
+ _max, NULL, _wmi); \
+ }
+
+#define ASUS_WMI_SHOW_INT(_attr, _wmi) \
+ static ssize_t _attr##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return armoury_attr_uint_show(kobj, attr, buf, _wmi); \
+ }
+
+/* Create functions and attributes for use in other macros or on their own */
+
+/* Shows a formatted static variable */
+#define __ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \
+ static ssize_t _attrname##_##_prop##_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ return sysfs_emit(buf, _fmt, _val); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_##_prop = \
+ __ASUS_ATTR_RO(_attrname, _prop)
+
+#define __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname)\
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RO(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define __ATTR_RW_INT_GROUP_ENUM(_attrname, _minv, _maxv, _wmi, _fsname,\
+ _possible, _dispname) \
+ __WMI_STORE_INT(_attrname##_current_value, _minv, _maxv, _wmi); \
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/* Boolean style enumeration, base macro. Requires adding show/store */
+#define __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define ASUS_ATTR_GROUP_BOOL_RO(_attrname, _fsname, _wmi, _dispname) \
+ __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, "0;1", _dispname)
+
+
+#define ASUS_ATTR_GROUP_BOOL_RW(_attrname, _fsname, _wmi, _dispname) \
+ __ATTR_RW_INT_GROUP_ENUM(_attrname, 0, 1, _wmi, _fsname, "0;1", _dispname)
+
+#define ASUS_ATTR_GROUP_ENUM_INT_RO(_attrname, _fsname, _wmi, _possible, _dispname) \
+ __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname)
+
+/*
+ * Requires <name>_current_value_show(), <name>_current_value_show()
+ */
+#define ASUS_ATTR_GROUP_BOOL(_attrname, _fsname, _dispname) \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname)
+
+/*
+ * Requires <name>_current_value_show(), <name>_current_value_show()
+ * and <name>_possible_values_show()
+ */
+#define ASUS_ATTR_GROUP_ENUM(_attrname, _fsname, _dispname) \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ static struct kobj_attribute attr_##_attrname##_possible_values = \
+ __ASUS_ATTR_RO(_attrname, possible_values); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(_attrname, _fsname, _wmi, _dispname) \
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RO(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, int_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_type.attr, NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/*
+ * ROG PPT attributes need a little different in setup as they
+ * require rog_tunables members.
+ */
+
+#define __ROG_TUNABLE_SHOW(_prop, _attrname, _val) \
+ static ssize_t _attrname##_##_prop##_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%d\n", tunables->power_limits->_val); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_##_prop = \
+ __ASUS_ATTR_RO(_attrname, _prop)
+
+#define __ROG_TUNABLE_SHOW_DEFAULT(_attrname) \
+ static ssize_t _attrname##_default_value_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ return sysfs_emit( \
+ buf, "%d\n", \
+ tunables->power_limits->_attrname##_def ? \
+ tunables->power_limits->_attrname##_def : \
+ tunables->power_limits->_attrname##_max); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_default_value = \
+ __ASUS_ATTR_RO(_attrname, default_value)
+
+#define __ROG_TUNABLE_RW(_attr, _wmi) \
+ static ssize_t _attr##_current_value_store( \
+ struct kobject *kobj, struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ if (tunables->power_limits->_attr##_min == \
+ tunables->power_limits->_attr##_max) \
+ return -EINVAL; \
+ \
+ return armoury_attr_uint_store(kobj, attr, buf, count, \
+ tunables->power_limits->_attr##_min, \
+ tunables->power_limits->_attr##_max, \
+ &tunables->_attr, _wmi); \
+ } \
+ static ssize_t _attr##_current_value_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%u\n", tunables->_attr); \
+ } \
+ static struct kobj_attribute attr_##_attr##_current_value = \
+ __ASUS_ATTR_RW(_attr, current_value)
+
+#define ASUS_ATTR_GROUP_ROG_TUNABLE(_attrname, _fsname, _wmi, _dispname) \
+ __ROG_TUNABLE_RW(_attrname, _wmi); \
+ __ROG_TUNABLE_SHOW_DEFAULT(_attrname); \
+ __ROG_TUNABLE_SHOW(min_value, _attrname, _attrname##_min); \
+ __ROG_TUNABLE_SHOW(max_value, _attrname, _attrname##_max); \
+ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", 1); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, int_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_default_value.attr, \
+ &attr_##_attrname##_min_value.attr, \
+ &attr_##_attrname##_max_value.attr, \
+ &attr_##_attrname##_scalar_increment.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/* Default is always the maximum value unless *_def is specified */
+struct power_limits {
+ u8 ppt_pl1_spl_min;
+ u8 ppt_pl1_spl_def;
+ u8 ppt_pl1_spl_max;
+ u8 ppt_pl2_sppt_min;
+ u8 ppt_pl2_sppt_def;
+ u8 ppt_pl2_sppt_max;
+ u8 ppt_pl3_fppt_min;
+ u8 ppt_pl3_fppt_def;
+ u8 ppt_pl3_fppt_max;
+ u8 ppt_apu_sppt_min;
+ u8 ppt_apu_sppt_def;
+ u8 ppt_apu_sppt_max;
+ u8 ppt_platform_sppt_min;
+ u8 ppt_platform_sppt_def;
+ u8 ppt_platform_sppt_max;
+ /* Nvidia GPU specific, default is always max */
+ u8 nv_dynamic_boost_def; // unused. exists for macro
+ u8 nv_dynamic_boost_min;
+ u8 nv_dynamic_boost_max;
+ u8 nv_temp_target_def; // unused. exists for macro
+ u8 nv_temp_target_min;
+ u8 nv_temp_target_max;
+ u8 nv_tgp_def; // unused. exists for macro
+ u8 nv_tgp_min;
+ u8 nv_tgp_max;
+};
+
+struct power_data {
+ const struct power_limits *ac_data;
+ const struct power_limits *dc_data;
+ bool requires_fan_curve;
+};
+
+/*
+ * For each available attribute there must be a min and a max.
+ * _def is not required and will be assumed to be default == max if missing.
+ */
+static const struct dmi_system_id power_limits[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA401W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 75,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507N"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507UV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507X"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 105,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 15,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA607P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 30,
+ .ppt_pl2_sppt_def = 115,
+ .ppt_pl2_sppt_max = 135,
+ .ppt_pl3_fppt_min = 30,
+ .ppt_pl3_fppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_def = 60,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 25,
+ .ppt_pl3_fppt_max = 80,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA608WI"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 90,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 90,
+ .ppt_pl2_sppt_max = 90,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 90,
+ .ppt_pl3_fppt_max = 90,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 65,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617NS"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 120,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_max = 35,
+ .ppt_platform_sppt_min = 45,
+ .ppt_platform_sppt_max = 100,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617NT"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 45,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 50,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617XS"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 120,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_max = 35,
+ .ppt_platform_sppt_min = 45,
+ .ppt_platform_sppt_max = 100,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507VI"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507VV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 115,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 15,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA401Q"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ // This model is full AMD. No Nvidia dGPU.
+ DMI_MATCH(DMI_BOARD_NAME, "GA402R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_def = 30,
+ .ppt_apu_sppt_max = 45,
+ .ppt_platform_sppt_min = 40,
+ .ppt_platform_sppt_max = 60,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA402X"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_def = 65,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 65,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA503QR"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 80,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA503R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 65,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 60,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA605W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU603Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 60,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 40,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ }
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU604V"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 65,
+ .ppt_pl1_spl_max = 120,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 150,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 40,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605CW"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 45,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 56,
+ .ppt_pl2_sppt_max = 110,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 80,
+ .nv_tgp_def = 90,
+ .nv_tgp_max = 110,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 32,
+ .ppt_pl2_sppt_max = 110,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605CX"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 45,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 56,
+ .ppt_pl2_sppt_max = 110,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 7,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 95,
+ .nv_tgp_def = 100,
+ .nv_tgp_max = 110,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 32,
+ .ppt_pl2_sppt_max = 110,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 38,
+ .ppt_pl2_sppt_max = 53,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV301Q"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV301R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 54,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV601R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 100,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 80,
+ .ppt_pl3_fppt_max = 125,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 28,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 60,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 80,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV601V"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 110,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 40,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GX650P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 110,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 35,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 42,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513I"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ /* Yes this laptop is very limited */
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513QM"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ /* Yes this laptop is very limited */
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 100,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 190,
+ },
+ .dc_data = NULL,
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 35,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 54,
+ .ppt_pl2_sppt_max = 100,
+ .ppt_pl3_fppt_min = 54,
+ .ppt_pl3_fppt_max = 125,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 50,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 50,
+ .ppt_pl3_fppt_min = 28,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G614J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G634J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G713PV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 120,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 65,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 130,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 75,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G733C"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 170,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 35,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G733P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 65,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 130,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 75,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G814J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 140,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G834J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "H7606W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 43,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_max = 53,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 15,
+ .ppt_pl1_spl_max = 25,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_def = 20,
+ .ppt_pl2_sppt_max = 30,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_def = 25,
+ .ppt_pl3_fppt_max = 35,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC72"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 43,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_max = 53,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 17,
+ .ppt_pl1_spl_max = 25,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_def = 24,
+ .ppt_pl2_sppt_max = 30,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_def = 30,
+ .ppt_pl3_fppt_max = 35,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC73XA"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 14,
+ .ppt_pl2_sppt_max = 45,
+ .ppt_pl3_fppt_min = 19,
+ .ppt_pl3_fppt_max = 55,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 17,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 13,
+ .ppt_pl2_sppt_def = 21,
+ .ppt_pl2_sppt_max = 45,
+ .ppt_pl3_fppt_min = 19,
+ .ppt_pl3_fppt_def = 26,
+ .ppt_pl3_fppt_max = 55,
+ },
+ },
+ },
+ {}
+};
+
+#endif /* _ASUS_ARMOURY_H_ */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index e72a2b5d158e..4aec7ec69250 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -15,6 +15,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmi.h>
@@ -30,6 +31,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_data/x86/asus-wmi-leds-ids.h>
#include <linux/platform_device.h>
#include <linux/platform_profile.h>
#include <linux/power_supply.h>
@@ -55,8 +57,6 @@ module_param(fnlock_default, bool, 0444);
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
-#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
-
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
@@ -105,8 +105,6 @@ module_param(fnlock_default, bool, 0444);
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
-#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
-
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
@@ -340,6 +338,13 @@ struct asus_wmi {
/* Global to allow setting externally without requiring driver data */
static enum asus_ally_mcu_hack use_ally_mcu_hack = ASUS_WMI_ALLY_MCU_HACK_INIT;
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+static void asus_wmi_show_deprecated(void)
+{
+ pr_notice_once("Accessing attributes through /sys/bus/platform/asus_wmi is deprecated and will be removed in a future release. Please switch over to /sys/class/firmware_attributes.\n");
+}
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
+
/* WMI ************************************************************************/
static int asus_wmi_evaluate_method3(u32 method_id,
@@ -390,7 +395,7 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
{
return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval);
}
-EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
+EXPORT_SYMBOL_NS_GPL(asus_wmi_evaluate_method, "ASUS_WMI");
static int asus_wmi_evaluate_method5(u32 method_id,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval)
@@ -554,12 +559,52 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
return 0;
}
-int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
- u32 *retval)
+/**
+ * asus_wmi_get_devstate_dsts() - Get the WMI function state.
+ * @dev_id: The WMI method ID to call.
+ * @retval: A pointer to where to store the value returned from WMI.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval)
+{
+ int err;
+
+ err = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, retval);
+ if (err)
+ return err;
+
+ if ((*retval & ASUS_WMI_DSTS_PRESENCE_BIT) == 0x00)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(asus_wmi_get_devstate_dsts, "ASUS_WMI");
+
+/**
+ * asus_wmi_set_devstate() - Set the WMI function state.
+ *
+ * Note: an asus_wmi_set_devstate() call must be paired with a
+ * asus_wmi_get_devstate_dsts() to check if the WMI function is supported.
+ *
+ * @dev_id: The WMI function to call.
+ * @ctrl_param: The argument to be used for this WMI function.
+ * @retval: A pointer to where to store the value returned from WMI.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval)
{
return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
ctrl_param, retval);
}
+EXPORT_SYMBOL_NS_GPL(asus_wmi_set_devstate, "ASUS_WMI");
/* Helper for special devices with magic return codes */
static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
@@ -692,6 +737,7 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
}
/* Charging mode, 1=Barrel, 2=USB ******************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t charge_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -702,12 +748,16 @@ static ssize_t charge_mode_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", value & 0xff);
}
static DEVICE_ATTR_RO(charge_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* dGPU ********************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t dgpu_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -718,6 +768,8 @@ static ssize_t dgpu_disable_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -771,8 +823,10 @@ static ssize_t dgpu_disable_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(dgpu_disable);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* eGPU ********************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t egpu_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -783,6 +837,8 @@ static ssize_t egpu_enable_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -839,8 +895,10 @@ static ssize_t egpu_enable_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(egpu_enable);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Is eGPU connected? *********************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t egpu_connected_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -851,12 +909,16 @@ static ssize_t egpu_connected_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
static DEVICE_ATTR_RO(egpu_connected);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* gpu mux switch *************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t gpu_mux_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -867,6 +929,8 @@ static ssize_t gpu_mux_mode_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -925,6 +989,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(gpu_mux_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* TUF Laptop Keyboard RGB Modes **********************************************/
static ssize_t kbd_rgb_mode_store(struct device *dev,
@@ -1048,6 +1113,7 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = {
};
/* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t ppt_pl2_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1086,6 +1152,8 @@ static ssize_t ppt_pl2_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt);
}
static DEVICE_ATTR_RW(ppt_pl2_sppt);
@@ -1128,6 +1196,8 @@ static ssize_t ppt_pl1_spl_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl);
}
static DEVICE_ATTR_RW(ppt_pl1_spl);
@@ -1148,7 +1218,7 @@ static ssize_t ppt_fppt_store(struct device *dev,
if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
return -EINVAL;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_FPPT, value, &result);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL3_FPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_fppt: %d\n", err);
return err;
@@ -1171,6 +1241,8 @@ static ssize_t ppt_fppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_fppt);
}
static DEVICE_ATTR_RW(ppt_fppt);
@@ -1214,6 +1286,8 @@ static ssize_t ppt_apu_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt);
}
static DEVICE_ATTR_RW(ppt_apu_sppt);
@@ -1257,6 +1331,8 @@ static ssize_t ppt_platform_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt);
}
static DEVICE_ATTR_RW(ppt_platform_sppt);
@@ -1300,6 +1376,8 @@ static ssize_t nv_dynamic_boost_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost);
}
static DEVICE_ATTR_RW(nv_dynamic_boost);
@@ -1343,9 +1421,12 @@ static ssize_t nv_temp_target_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->nv_temp_target);
}
static DEVICE_ATTR_RW(nv_temp_target);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Ally MCU Powersave ********************************************************/
@@ -1386,6 +1467,7 @@ void set_ally_mcu_powersave(bool enabled)
}
EXPORT_SYMBOL_NS_GPL(set_ally_mcu_powersave, "ASUS_WMI");
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t mcu_powersave_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1396,6 +1478,8 @@ static ssize_t mcu_powersave_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -1431,6 +1515,7 @@ static ssize_t mcu_powersave_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(mcu_powersave);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Battery ********************************************************************/
@@ -1619,14 +1704,14 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
kbd_led_update(asus);
}
-static void kbd_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int kbd_led_set(struct led_classdev *led_cdev, enum led_brightness value)
{
/* Prevent disabling keyboard backlight on module unregister */
if (led_cdev->flags & LED_UNREGISTERING)
- return;
+ return 0;
do_kbd_led_set(led_cdev, value);
+ return 0;
}
static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value)
@@ -1802,7 +1887,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
- asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_set_blocking = kbd_led_set;
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
@@ -2304,6 +2389,7 @@ exit:
}
/* Panel Overdrive ************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t panel_od_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2314,6 +2400,8 @@ static ssize_t panel_od_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -2350,9 +2438,10 @@ static ssize_t panel_od_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(panel_od);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Bootup sound ***************************************************************/
-
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t boot_sound_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2363,6 +2452,8 @@ static ssize_t boot_sound_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -2398,8 +2489,10 @@ static ssize_t boot_sound_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(boot_sound);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Mini-LED mode **************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t mini_led_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2430,6 +2523,8 @@ static ssize_t mini_led_mode_show(struct device *dev,
}
}
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", value);
}
@@ -2500,10 +2595,13 @@ static ssize_t available_mini_led_mode_show(struct device *dev,
return sysfs_emit(buf, "0 1 2\n");
}
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR_RO(available_mini_led_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Quirks *********************************************************************/
@@ -3791,6 +3889,7 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
return throttle_thermal_policy_write(asus);
}
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t throttle_thermal_policy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -3834,6 +3933,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
* Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
*/
static DEVICE_ATTR_RW(throttle_thermal_policy);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Platform profile ***********************************************************/
static int asus_wmi_platform_profile_get(struct device *dev,
@@ -4435,27 +4535,29 @@ static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
- &dev_attr_charge_mode.attr,
- &dev_attr_egpu_enable.attr,
- &dev_attr_egpu_connected.attr,
- &dev_attr_dgpu_disable.attr,
- &dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
- &dev_attr_throttle_thermal_policy.attr,
- &dev_attr_ppt_pl2_sppt.attr,
- &dev_attr_ppt_pl1_spl.attr,
- &dev_attr_ppt_fppt.attr,
- &dev_attr_ppt_apu_sppt.attr,
- &dev_attr_ppt_platform_sppt.attr,
- &dev_attr_nv_dynamic_boost.attr,
- &dev_attr_nv_temp_target.attr,
- &dev_attr_mcu_powersave.attr,
- &dev_attr_boot_sound.attr,
- &dev_attr_panel_od.attr,
- &dev_attr_mini_led_mode.attr,
- &dev_attr_available_mini_led_mode.attr,
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+ &dev_attr_charge_mode.attr,
+ &dev_attr_egpu_enable.attr,
+ &dev_attr_egpu_connected.attr,
+ &dev_attr_dgpu_disable.attr,
+ &dev_attr_gpu_mux_mode.attr,
+ &dev_attr_ppt_pl2_sppt.attr,
+ &dev_attr_ppt_pl1_spl.attr,
+ &dev_attr_ppt_fppt.attr,
+ &dev_attr_ppt_apu_sppt.attr,
+ &dev_attr_ppt_platform_sppt.attr,
+ &dev_attr_nv_dynamic_boost.attr,
+ &dev_attr_nv_temp_target.attr,
+ &dev_attr_mcu_powersave.attr,
+ &dev_attr_boot_sound.attr,
+ &dev_attr_panel_od.attr,
+ &dev_attr_mini_led_mode.attr,
+ &dev_attr_available_mini_led_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
NULL
};
@@ -4477,7 +4579,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
- else if (attr == &dev_attr_charge_mode.attr)
+ else if (attr == &dev_attr_fan_boost_mode.attr)
+ ok = asus->fan_boost_mode_available;
+
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+ if (attr == &dev_attr_charge_mode.attr)
devid = ASUS_WMI_DEVID_CHARGE_MODE;
else if (attr == &dev_attr_egpu_enable.attr)
ok = asus->egpu_enable_available;
@@ -4496,7 +4602,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_ppt_pl1_spl.attr)
devid = ASUS_WMI_DEVID_PPT_PL1_SPL;
else if (attr == &dev_attr_ppt_fppt.attr)
- devid = ASUS_WMI_DEVID_PPT_FPPT;
+ devid = ASUS_WMI_DEVID_PPT_PL3_FPPT;
else if (attr == &dev_attr_ppt_apu_sppt.attr)
devid = ASUS_WMI_DEVID_PPT_APU_SPPT;
else if (attr == &dev_attr_ppt_platform_sppt.attr)
@@ -4515,6 +4621,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
ok = asus->mini_led_dev_id != 0;
else if (attr == &dev_attr_available_mini_led_mode.attr)
ok = asus->mini_led_dev_id != 0;
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
if (devid != -1) {
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -4770,6 +4877,7 @@ static int asus_wmi_add(struct platform_device *pdev)
}
/* ensure defaults for tunables */
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
asus->ppt_pl2_sppt = 5;
asus->ppt_pl1_spl = 5;
asus->ppt_apu_sppt = 5;
@@ -4792,17 +4900,18 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX;
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO))
asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO;
-
- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE))
- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
- else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY))
asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY;
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO))
asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
diff --git a/drivers/platform/x86/ayaneo-ec.c b/drivers/platform/x86/ayaneo-ec.c
new file mode 100644
index 000000000000..41a24e091248
--- /dev/null
+++ b/drivers/platform/x86/ayaneo-ec.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Platform driver for the Embedded Controller (EC) of Ayaneo devices. Handles
+ * hwmon (fan speed, fan control), battery charge limits, and magic module
+ * control (connected modules, controller disconnection).
+ *
+ * Copyright (C) 2025 Antheas Kapenekakis <lkml@antheas.dev>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+#include <acpi/battery.h>
+
+#define AYANEO_PWM_ENABLE_REG 0x4A
+#define AYANEO_PWM_REG 0x4B
+#define AYANEO_PWM_MODE_AUTO 0x00
+#define AYANEO_PWM_MODE_MANUAL 0x01
+
+#define AYANEO_FAN_REG 0x76
+
+#define EC_CHARGE_CONTROL_BEHAVIOURS \
+ (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+#define AYANEO_CHARGE_REG 0x1e
+#define AYANEO_CHARGE_VAL_AUTO 0xaa
+#define AYANEO_CHARGE_VAL_INHIBIT 0x55
+
+#define AYANEO_POWER_REG 0x2d
+#define AYANEO_POWER_OFF 0xfe
+#define AYANEO_POWER_ON 0xff
+#define AYANEO_MODULE_REG 0x2f
+#define AYANEO_MODULE_LEFT BIT(0)
+#define AYANEO_MODULE_RIGHT BIT(1)
+#define AYANEO_MODULE_MASK (AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT)
+
+struct ayaneo_ec_quirk {
+ bool has_fan_control;
+ bool has_charge_control;
+ bool has_magic_modules;
+};
+
+struct ayaneo_ec_platform_data {
+ struct platform_device *pdev;
+ struct ayaneo_ec_quirk *quirks;
+ struct acpi_battery_hook battery_hook;
+
+ // Protects access to restore_pwm
+ struct mutex hwmon_lock;
+ bool restore_charge_limit;
+ bool restore_pwm;
+};
+
+static const struct ayaneo_ec_quirk quirk_fan = {
+ .has_fan_control = true,
+};
+
+static const struct ayaneo_ec_quirk quirk_charge_limit = {
+ .has_fan_control = true,
+ .has_charge_control = true,
+};
+
+static const struct ayaneo_ec_quirk quirk_ayaneo3 = {
+ .has_fan_control = true,
+ .has_charge_control = true,
+ .has_magic_modules = true,
+};
+
+static const struct dmi_system_id dmi_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 3"),
+ },
+ .driver_data = (void *)&quirk_ayaneo3,
+ },
+ {},
+};
+
+/* Callbacks for hwmon interface */
+static umode_t ayaneo_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return 0444;
+ case hwmon_pwm:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ayaneo_ec_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ u8 tmp;
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = ec_read(AYANEO_FAN_REG, &tmp);
+ if (ret)
+ return ret;
+ *val = tmp << 8;
+ ret = ec_read(AYANEO_FAN_REG + 1, &tmp);
+ if (ret)
+ return ret;
+ *val |= tmp;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = ec_read(AYANEO_PWM_REG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp > 100)
+ return -EIO;
+ *val = (255 * tmp) / 100;
+ return 0;
+ case hwmon_pwm_enable:
+ ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp == AYANEO_PWM_MODE_MANUAL)
+ *val = 1;
+ else if (tmp == AYANEO_PWM_MODE_AUTO)
+ *val = 2;
+ else
+ return -EIO;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int ayaneo_ec_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct ayaneo_ec_platform_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ guard(mutex)(&data->hwmon_lock);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ data->restore_pwm = false;
+ switch (val) {
+ case 1:
+ return ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_MANUAL);
+ case 2:
+ return ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_AUTO);
+ default:
+ return -EINVAL;
+ }
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ if (data->restore_pwm) {
+ /*
+ * Defer restoring PWM control to after
+ * userspace resumes successfully
+ */
+ ret = ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_MANUAL);
+ if (ret)
+ return ret;
+ data->restore_pwm = false;
+ }
+ return ec_write(AYANEO_PWM_REG, (val * 100) / 255);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops ayaneo_ec_hwmon_ops = {
+ .is_visible = ayaneo_ec_hwmon_is_visible,
+ .read = ayaneo_ec_read,
+ .write = ayaneo_ec_write,
+};
+
+static const struct hwmon_channel_info *const ayaneo_ec_sensors[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL,
+};
+
+static const struct hwmon_chip_info ayaneo_ec_chip_info = {
+ .ops = &ayaneo_ec_hwmon_ops,
+ .info = ayaneo_ec_sensors,
+};
+
+static int ayaneo_psy_ext_get_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret;
+ u8 tmp;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ ret = ec_read(AYANEO_CHARGE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ if (tmp == AYANEO_CHARGE_VAL_INHIBIT)
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ayaneo_psy_ext_set_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ u8 raw_val;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ raw_val = AYANEO_CHARGE_VAL_AUTO;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ raw_val = AYANEO_CHARGE_VAL_INHIBIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ec_write(AYANEO_CHARGE_REG, raw_val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ayaneo_psy_prop_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp)
+{
+ return true;
+}
+
+static const enum power_supply_property ayaneo_psy_ext_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+};
+
+static const struct power_supply_ext ayaneo_psy_ext = {
+ .name = "ayaneo-charge-control",
+ .properties = ayaneo_psy_ext_props,
+ .num_properties = ARRAY_SIZE(ayaneo_psy_ext_props),
+ .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS,
+ .get_property = ayaneo_psy_ext_get_prop,
+ .set_property = ayaneo_psy_ext_set_prop,
+ .property_is_writeable = ayaneo_psy_prop_is_writeable,
+};
+
+static int ayaneo_add_battery(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ struct ayaneo_ec_platform_data *data =
+ container_of(hook, struct ayaneo_ec_platform_data, battery_hook);
+
+ return power_supply_register_extension(battery, &ayaneo_psy_ext,
+ &data->pdev->dev, NULL);
+}
+
+static int ayaneo_remove_battery(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ power_supply_unregister_extension(battery, &ayaneo_psy_ext);
+ return 0;
+}
+
+static ssize_t controller_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool value;
+ int ret;
+
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+
+ ret = ec_write(AYANEO_POWER_REG, value ? AYANEO_POWER_ON : AYANEO_POWER_OFF);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t controller_power_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val;
+
+ ret = ec_read(AYANEO_POWER_REG, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val == AYANEO_POWER_ON);
+}
+
+static DEVICE_ATTR_RW(controller_power);
+
+static ssize_t controller_modules_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 unconnected_modules;
+ char *out;
+ int ret;
+
+ ret = ec_read(AYANEO_MODULE_REG, &unconnected_modules);
+ if (ret)
+ return ret;
+
+ switch (~unconnected_modules & AYANEO_MODULE_MASK) {
+ case AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT:
+ out = "both";
+ break;
+ case AYANEO_MODULE_LEFT:
+ out = "left";
+ break;
+ case AYANEO_MODULE_RIGHT:
+ out = "right";
+ break;
+ default:
+ out = "none";
+ break;
+ }
+
+ return sysfs_emit(buf, "%s\n", out);
+}
+
+static DEVICE_ATTR_RO(controller_modules);
+
+static struct attribute *aya_mm_attrs[] = {
+ &dev_attr_controller_power.attr,
+ &dev_attr_controller_modules.attr,
+ NULL
+};
+
+static umode_t aya_mm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+
+ if (data->quirks->has_magic_modules)
+ return attr->mode;
+ return 0;
+}
+
+static const struct attribute_group aya_mm_attribute_group = {
+ .is_visible = aya_mm_is_visible,
+ .attrs = aya_mm_attrs,
+};
+
+static const struct attribute_group *ayaneo_ec_groups[] = {
+ &aya_mm_attribute_group,
+ NULL
+};
+
+static int ayaneo_ec_probe(struct platform_device *pdev)
+{
+ const struct dmi_system_id *dmi_entry;
+ struct ayaneo_ec_platform_data *data;
+ struct device *hwdev;
+ int ret;
+
+ dmi_entry = dmi_first_match(dmi_table);
+ if (!dmi_entry)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ data->quirks = dmi_entry->driver_data;
+ ret = devm_mutex_init(&pdev->dev, &data->hwmon_lock);
+ if (ret)
+ return ret;
+ platform_set_drvdata(pdev, data);
+
+ if (data->quirks->has_fan_control) {
+ hwdev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "ayaneo_ec", data, &ayaneo_ec_chip_info, NULL);
+ if (IS_ERR(hwdev))
+ return PTR_ERR(hwdev);
+ }
+
+ if (data->quirks->has_charge_control) {
+ data->battery_hook.add_battery = ayaneo_add_battery;
+ data->battery_hook.remove_battery = ayaneo_remove_battery;
+ data->battery_hook.name = "Ayaneo Battery";
+ ret = devm_battery_hook_register(&pdev->dev, &data->battery_hook);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ayaneo_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+ int ret;
+ u8 tmp;
+
+ if (data->quirks->has_charge_control) {
+ ret = ec_read(AYANEO_CHARGE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ data->restore_charge_limit = tmp == AYANEO_CHARGE_VAL_INHIBIT;
+ }
+
+ if (data->quirks->has_fan_control) {
+ ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ data->restore_pwm = tmp == AYANEO_PWM_MODE_MANUAL;
+
+ /*
+ * Release the fan when entering hibernation to avoid
+ * overheating if hibernation fails and hangs.
+ */
+ if (data->restore_pwm) {
+ ret = ec_write(AYANEO_PWM_ENABLE_REG, AYANEO_PWM_MODE_AUTO);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ayaneo_restore(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+ int ret;
+
+ if (data->quirks->has_charge_control && data->restore_charge_limit) {
+ ret = ec_write(AYANEO_CHARGE_REG, AYANEO_CHARGE_VAL_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ayaneo_pm_ops = {
+ .freeze = ayaneo_freeze,
+ .restore = ayaneo_restore,
+};
+
+static struct platform_driver ayaneo_platform_driver = {
+ .driver = {
+ .name = "ayaneo-ec",
+ .dev_groups = ayaneo_ec_groups,
+ .pm = pm_sleep_ptr(&ayaneo_pm_ops),
+ },
+ .probe = ayaneo_ec_probe,
+};
+
+static struct platform_device *ayaneo_platform_device;
+
+static int __init ayaneo_ec_init(void)
+{
+ ayaneo_platform_device =
+ platform_create_bundle(&ayaneo_platform_driver,
+ ayaneo_ec_probe, NULL, 0, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(ayaneo_platform_device);
+}
+
+static void __exit ayaneo_ec_exit(void)
+{
+ platform_device_unregister(ayaneo_platform_device);
+ platform_driver_unregister(&ayaneo_platform_driver);
+}
+
+MODULE_DEVICE_TABLE(dmi, dmi_table);
+
+module_init(ayaneo_ec_init);
+module_exit(ayaneo_ec_exit);
+
+MODULE_AUTHOR("Antheas Kapenekakis <lkml@antheas.dev>");
+MODULE_DESCRIPTION("Ayaneo Embedded Controller (EC) platform features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index fadf7aac6779..1418bd326edf 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -235,11 +235,6 @@ enum AWCC_THERMAL_TABLES {
AWCC_THERMAL_TABLE_USTT = 0xA,
};
-enum AWCC_SPECIAL_THERMAL_CODES {
- AWCC_SPECIAL_PROFILE_CUSTOM = 0x00,
- AWCC_SPECIAL_PROFILE_GMODE = 0xAB,
-};
-
enum AWCC_TEMP_SENSOR_TYPES {
AWCC_TEMP_SENSOR_CPU = 0x01,
AWCC_TEMP_SENSOR_FRONT = 0x03,
@@ -266,17 +261,18 @@ enum AWCC_FAN_TYPES {
};
enum awcc_thermal_profile {
- AWCC_PROFILE_USTT_BALANCED,
- AWCC_PROFILE_USTT_BALANCED_PERFORMANCE,
- AWCC_PROFILE_USTT_COOL,
- AWCC_PROFILE_USTT_QUIET,
- AWCC_PROFILE_USTT_PERFORMANCE,
- AWCC_PROFILE_USTT_LOW_POWER,
- AWCC_PROFILE_LEGACY_QUIET,
- AWCC_PROFILE_LEGACY_BALANCED,
- AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE,
- AWCC_PROFILE_LEGACY_PERFORMANCE,
- AWCC_PROFILE_LAST,
+ AWCC_PROFILE_SPECIAL_CUSTOM = 0x00,
+ AWCC_PROFILE_LEGACY_QUIET = 0x96,
+ AWCC_PROFILE_LEGACY_BALANCED = 0x97,
+ AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE = 0x98,
+ AWCC_PROFILE_LEGACY_PERFORMANCE = 0x99,
+ AWCC_PROFILE_USTT_BALANCED = 0xA0,
+ AWCC_PROFILE_USTT_BALANCED_PERFORMANCE = 0xA1,
+ AWCC_PROFILE_USTT_COOL = 0xA2,
+ AWCC_PROFILE_USTT_QUIET = 0xA3,
+ AWCC_PROFILE_USTT_PERFORMANCE = 0xA4,
+ AWCC_PROFILE_USTT_LOW_POWER = 0xA5,
+ AWCC_PROFILE_SPECIAL_GMODE = 0xAB,
};
struct wmax_led_args {
@@ -332,19 +328,6 @@ struct awcc_priv {
u32 gpio_count;
};
-static const enum platform_profile_option awcc_mode_to_platform_profile[AWCC_PROFILE_LAST] = {
- [AWCC_PROFILE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [AWCC_PROFILE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [AWCC_PROFILE_USTT_COOL] = PLATFORM_PROFILE_COOL,
- [AWCC_PROFILE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
- [AWCC_PROFILE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
- [AWCC_PROFILE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
- [AWCC_PROFILE_LEGACY_QUIET] = PLATFORM_PROFILE_QUIET,
- [AWCC_PROFILE_LEGACY_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [AWCC_PROFILE_LEGACY_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
-};
-
static struct awcc_quirks *awcc;
/*
@@ -562,21 +545,41 @@ const struct attribute_group wmax_deepsleep_attribute_group = {
/*
* AWCC Helpers
*/
-static bool is_awcc_thermal_profile_id(u8 code)
+static int awcc_profile_to_pprof(enum awcc_thermal_profile profile,
+ enum platform_profile_option *pprof)
{
- u8 table = FIELD_GET(AWCC_THERMAL_TABLE_MASK, code);
- u8 mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, code);
-
- if (mode >= AWCC_PROFILE_LAST)
- return false;
-
- if (table == AWCC_THERMAL_TABLE_LEGACY && mode >= AWCC_PROFILE_LEGACY_QUIET)
- return true;
-
- if (table == AWCC_THERMAL_TABLE_USTT && mode <= AWCC_PROFILE_USTT_LOW_POWER)
- return true;
+ switch (profile) {
+ case AWCC_PROFILE_SPECIAL_CUSTOM:
+ *pprof = PLATFORM_PROFILE_CUSTOM;
+ break;
+ case AWCC_PROFILE_LEGACY_QUIET:
+ case AWCC_PROFILE_USTT_QUIET:
+ *pprof = PLATFORM_PROFILE_QUIET;
+ break;
+ case AWCC_PROFILE_LEGACY_BALANCED:
+ case AWCC_PROFILE_USTT_BALANCED:
+ *pprof = PLATFORM_PROFILE_BALANCED;
+ break;
+ case AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE:
+ case AWCC_PROFILE_USTT_BALANCED_PERFORMANCE:
+ *pprof = PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+ break;
+ case AWCC_PROFILE_LEGACY_PERFORMANCE:
+ case AWCC_PROFILE_USTT_PERFORMANCE:
+ case AWCC_PROFILE_SPECIAL_GMODE:
+ *pprof = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case AWCC_PROFILE_USTT_COOL:
+ *pprof = PLATFORM_PROFILE_COOL;
+ break;
+ case AWCC_PROFILE_USTT_LOW_POWER:
+ *pprof = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ default:
+ return -EINVAL;
+ }
- return false;
+ return 0;
}
static int awcc_wmi_command(struct wmi_device *wdev, u32 method_id,
@@ -1225,24 +1228,7 @@ static int awcc_platform_profile_get(struct device *dev,
if (ret)
return ret;
- switch (out_data) {
- case AWCC_SPECIAL_PROFILE_CUSTOM:
- *profile = PLATFORM_PROFILE_CUSTOM;
- return 0;
- case AWCC_SPECIAL_PROFILE_GMODE:
- *profile = PLATFORM_PROFILE_PERFORMANCE;
- return 0;
- default:
- break;
- }
-
- if (!is_awcc_thermal_profile_id(out_data))
- return -ENODATA;
-
- out_data = FIELD_GET(AWCC_THERMAL_MODE_MASK, out_data);
- *profile = awcc_mode_to_platform_profile[out_data];
-
- return 0;
+ return awcc_profile_to_pprof(out_data, profile);
}
static int awcc_platform_profile_set(struct device *dev,
@@ -1279,7 +1265,6 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
{
enum platform_profile_option profile;
struct awcc_priv *priv = drvdata;
- enum awcc_thermal_profile mode;
u8 id, offset = 0;
int ret;
@@ -1301,15 +1286,20 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
if (ret)
return ret;
- if (!is_awcc_thermal_profile_id(id)) {
+ /*
+ * G-Mode profile ID is not listed consistently across modeles
+ * that support it, therefore we handle it through quirks.
+ */
+ if (id == AWCC_PROFILE_SPECIAL_GMODE)
+ continue;
+
+ ret = awcc_profile_to_pprof(id, &profile);
+ if (ret) {
dev_dbg(&priv->wdev->dev, "Unmapped thermal profile ID 0x%02x\n", id);
continue;
}
- mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, id);
- profile = awcc_mode_to_platform_profile[mode];
priv->supported_profiles[profile] = id;
-
__set_bit(profile, choices);
}
@@ -1318,14 +1308,14 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
if (awcc->gmode) {
priv->supported_profiles[PLATFORM_PROFILE_PERFORMANCE] =
- AWCC_SPECIAL_PROFILE_GMODE;
+ AWCC_PROFILE_SPECIAL_GMODE;
__set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
/* Every model supports the "custom" profile */
priv->supported_profiles[PLATFORM_PROFILE_CUSTOM] =
- AWCC_SPECIAL_PROFILE_CUSTOM;
+ AWCC_PROFILE_SPECIAL_CUSTOM;
__set_bit(PLATFORM_PROFILE_CUSTOM, choices);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index 7a20f68ae206..c9236738f896 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -112,14 +112,14 @@ set_speed:
gpd_pocket_fan_set_speed(fan, speed);
/* When mostly idle (low temp/speed), slow down the poll interval. */
- queue_delayed_work(system_wq, &fan->work,
+ queue_delayed_work(system_percpu_wq, &fan->work,
msecs_to_jiffies(4000 / (speed + 1)));
}
static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan)
{
fan->last_speed = -1;
- mod_delayed_work(system_wq, &fan->work, 0);
+ mod_delayed_work(system_percpu_wq, &fan->work, 0);
}
static int gpd_pocket_fan_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index ad9d9f97960f..f4ea1ea05997 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -63,12 +63,16 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
* contains "PerformanceControl".
*/
static const char * const omen_thermal_profile_boards[] = {
- "84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
- "8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
- "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
- "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
- "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
+ "84DA", "84DB", "84DC",
+ "8572", "8573", "8574", "8575",
+ "8600", "8601", "8602", "8603", "8604", "8605", "8606", "8607", "860A",
+ "8746", "8747", "8748", "8749", "874A", "8786", "8787", "8788", "878A",
+ "878B", "878C", "87B5",
+ "886B", "886C", "88C8", "88CB", "88D1", "88D2", "88F4", "88F5", "88F6",
+ "88F7", "88FD", "88FE", "88FF",
+ "8900", "8901", "8902", "8912", "8917", "8918", "8949", "894A", "89EB",
+ "8A15", "8A42",
+ "8BAD",
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
@@ -76,7 +80,8 @@ static const char * const omen_thermal_profile_boards[] = {
* the get system design information WMI call returns
*/
static const char * const omen_thermal_profile_force_v0_boards[] = {
- "8607", "8746", "8747", "8749", "874A", "8748"
+ "8607",
+ "8746", "8747", "8748", "8749", "874A",
};
/* DMI board names of Omen laptops that have a thermal profile timer which will
@@ -84,12 +89,13 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
* "balanced" when reaching zero.
*/
static const char * const omen_timed_thermal_profile_boards[] = {
- "8BAD", "8A42", "8A15"
+ "8A15", "8A42",
+ "8BAD",
};
/* DMI Board names of Victus 16-d1xxx laptops */
static const char * const victus_thermal_profile_boards[] = {
- "8A25"
+ "8A25",
};
/* DMI Board names of Victus 16-r and Victus 16-s laptops */
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 19a2246f2770..2900407d6095 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -41,6 +41,19 @@ config INTEL_VBTN
To compile this driver as a module, choose M here: the module will
be called intel_vbtn.
+config INTEL_EHL_PSE_IO
+ tristate "Intel Elkhart Lake PSE I/O driver"
+ depends on PCI
+ select AUXILIARY_BUS
+ help
+ Select this option to enable Intel Elkhart Lake PSE GPIO and Timed
+ I/O support. This driver enumerates the PCI parent device and
+ creates auxiliary child devices for these capabilities. The actual
+ functionalities are provided by their respective auxiliary drivers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_ehl_pse_io.
+
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
depends on GPIOLIB && ACPI && PM_SLEEP
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 78acb414e154..138b13756158 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -21,6 +21,7 @@ intel-target-$(CONFIG_INTEL_HID_EVENT) += hid.o
intel-target-$(CONFIG_INTEL_VBTN) += vbtn.o
# Intel miscellaneous drivers
+intel-target-$(CONFIG_INTEL_EHL_PSE_IO) += ehl_pse_io.o
intel-target-$(CONFIG_INTEL_INT0002_VGPIO) += int0002_vgpio.o
intel-target-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
intel-target-$(CONFIG_INTEL_OAKTRAIL) += oaktrail.o
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index 29e8b5432f4c..d183aa53c318 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -77,7 +77,7 @@ static const struct software_node max17047_node = {
* software node.
*/
static struct software_node_ref_args fusb302_mux_refs[] = {
- { .node = NULL },
+ SOFTWARE_NODE_REFERENCE(NULL),
};
static const struct property_entry fusb302_properties[] = {
@@ -190,11 +190,6 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
software_node_unregister_node_group(node_group);
- if (fusb302_mux_refs[0].node) {
- fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node));
- fusb302_mux_refs[0].node = NULL;
- }
-
if (data->dp) {
data->dp->secondary = NULL;
fwnode_handle_put(data->dp);
@@ -202,7 +197,15 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
}
}
-static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
+static void cht_int33fe_put_swnode(void *data)
+{
+ struct fwnode_handle *fwnode = data;
+
+ fwnode_handle_put(fwnode);
+ fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(NULL);
+}
+
+static int cht_int33fe_add_nodes(struct device *dev, struct cht_int33fe_data *data)
{
const struct software_node *mux_ref_node;
int ret;
@@ -212,17 +215,25 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
* until the mux driver has created software node for the mux device.
* It means we depend on the mux driver. This function will return
* -EPROBE_DEFER until the mux device is registered.
+ *
+ * FIXME: the relevant software node exists in intel-xhci-usb-role-switch
+ * and - if exported - could be used to set up a static reference.
*/
mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
if (!mux_ref_node)
return -EPROBE_DEFER;
+ ret = devm_add_action_or_reset(dev, cht_int33fe_put_swnode,
+ software_node_fwnode(mux_ref_node));
+ if (ret)
+ return ret;
+
/*
* Update node used in "usb-role-switch" property. Note that we
* rely on software_node_register_node_group() to use the original
* instance of properties instead of copying them.
*/
- fusb302_mux_refs[0].node = mux_ref_node;
+ fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(mux_ref_node);
ret = software_node_register_node_group(node_group);
if (ret)
@@ -345,7 +356,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
return fusb302_irq;
}
- ret = cht_int33fe_add_nodes(data);
+ ret = cht_int33fe_add_nodes(dev, data);
if (ret)
return ret;
diff --git a/drivers/platform/x86/intel/ehl_pse_io.c b/drivers/platform/x86/intel/ehl_pse_io.c
new file mode 100644
index 000000000000..861e14808b35
--- /dev/null
+++ b/drivers/platform/x86/intel/ehl_pse_io.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Elkhart Lake Programmable Service Engine (PSE) I/O
+ *
+ * Copyright (c) 2025 Intel Corporation.
+ *
+ * Author: Raag Jadav <raag.jadav@intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device/devres.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include <linux/ehl_pse_io_aux.h>
+
+#define EHL_PSE_IO_DEV_SIZE SZ_4K
+
+static int ehl_pse_io_dev_create(struct pci_dev *pci, const char *name, int idx)
+{
+ struct device *dev = &pci->dev;
+ struct auxiliary_device *adev;
+ struct ehl_pse_io_data *data;
+ resource_size_t start, offset;
+ u32 id;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ id = (pci_domain_nr(pci->bus) << 16) | pci_dev_id(pci);
+ start = pci_resource_start(pci, 0);
+ offset = EHL_PSE_IO_DEV_SIZE * idx;
+
+ data->mem = DEFINE_RES_MEM(start + offset, EHL_PSE_IO_DEV_SIZE);
+ data->irq = pci_irq_vector(pci, idx);
+
+ adev = __devm_auxiliary_device_create(dev, EHL_PSE_IO_NAME, name, data, id);
+
+ return adev ? 0 : -ENODEV;
+}
+
+static int ehl_pse_io_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ int ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ ret = pci_alloc_irq_vectors(pci, 2, 2, PCI_IRQ_MSI);
+ if (ret < 0)
+ return ret;
+
+ ret = ehl_pse_io_dev_create(pci, EHL_PSE_GPIO_NAME, 0);
+ if (ret)
+ return ret;
+
+ return ehl_pse_io_dev_create(pci, EHL_PSE_TIO_NAME, 1);
+}
+
+static const struct pci_device_id ehl_pse_io_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x4b88) },
+ { PCI_VDEVICE(INTEL, 0x4b89) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ehl_pse_io_ids);
+
+static struct pci_driver ehl_pse_io_driver = {
+ .name = EHL_PSE_IO_NAME,
+ .id_table = ehl_pse_io_ids,
+ .probe = ehl_pse_io_probe,
+};
+module_pci_driver(ehl_pse_io_driver);
+
+MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
+MODULE_DESCRIPTION("Intel Elkhart Lake PSE I/O driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 9c07a7faf18f..560cc063198e 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -177,6 +177,18 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 10 Tablet RA00260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 12 Tablet RA02260"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 17ad87b392ab..eb23bc68340a 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -281,6 +281,7 @@ static const struct pmc_reg_map arl_socs_reg_map = {
.etr3_offset = ETR3_OFFSET,
.pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET,
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
+ .lpm_req_guid = SOCS_LPM_REQ_GUID,
};
static const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
@@ -648,26 +649,23 @@ static const struct pmc_reg_map arl_pchs_reg_map = {
.lpm_num_maps = ADL_LPM_NUM_MAPS,
.lpm_reg_index = ARL_LPM_REG_INDEX,
.etr3_offset = ETR3_OFFSET,
+ .lpm_req_guid = PCHS_LPM_REQ_GUID,
};
static struct pmc_info arl_pmc_info_list[] = {
{
- .guid = IOEP_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
- .guid = SOCS_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_SOCS,
.map = &arl_socs_reg_map,
},
{
- .guid = PCHS_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_PCHS,
.map = &arl_pchs_reg_map,
},
{
- .guid = SOCM_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_SOCM,
.map = &mtl_socm_reg_map,
},
@@ -720,9 +718,10 @@ static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_
return generic_core_init(pmcdev, pmc_dev_info);
}
+static u32 ARL_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, 0x0};
struct pmc_dev_info arl_pmc_dev = {
.pci_func = 0,
- .dmu_guid = ARL_PMT_DMU_GUID,
+ .dmu_guids = ARL_PMT_DMU_GUIDS,
.regmap_list = arl_pmc_info_list,
.map = &arl_socs_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
@@ -732,9 +731,10 @@ struct pmc_dev_info arl_pmc_dev = {
.sub_req = pmc_core_pmt_get_lpm_req,
};
+static u32 ARL_H_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, ARL_H_PMT_DMU_GUID, 0x0};
struct pmc_dev_info arl_h_pmc_dev = {
.pci_func = 2,
- .dmu_guid = ARL_PMT_DMU_GUID,
+ .dmu_guids = ARL_H_PMT_DMU_GUIDS,
.regmap_list = arl_pmc_info_list,
.map = &mtl_socm_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index ac3d19ae8c56..7d7ae8a40b0e 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -20,6 +20,7 @@ enum header_type {
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmi.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -311,20 +312,20 @@ static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
}
static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
- int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
+ int pmc_idx, u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
- pmc_index, ip, pf_map[idx][index].name,
+ pmc_idx, ip, pf_map[idx][index].name,
pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
unsigned int index, iter, idx, ip = 0;
@@ -342,7 +343,7 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
for (idx = 0; maps[idx]; idx++) {
for (index = 0; maps[idx][index].name &&
index < pmc->map->ppfear_buckets * 8; ip++, index++)
- pmc_core_display_map(s, index, idx, ip, i,
+ pmc_core_display_map(s, index, idx, ip, pmc_idx,
pf_regs[index / 8], maps);
}
}
@@ -471,7 +472,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
struct pmc *pmc;
const struct pmc_reg_map *map;
u32 reg;
- unsigned int pmc_index;
+ unsigned int pmc_idx;
int ltr_index;
ltr_index = value;
@@ -479,8 +480,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
* is based on the contiguous indexes from ltr_show output.
* pmc index and ltr index needs to be calculated from it.
*/
- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
- pmc = pmcdev->pmcs[pmc_index];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_idx++) {
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -497,10 +498,10 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
}
- if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
+ if (pmc_idx >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
return -EINVAL;
- pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
+ pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_idx, ltr_index);
guard(mutex)(&pmcdev->lock);
@@ -635,14 +636,14 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
- unsigned int i, index, ltr_index = 0;
+ unsigned int pmc_idx, index, ltr_index = 0;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
struct pmc *pmc;
const struct pmc_bit_map *map;
u32 ltr_ign_reg;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -676,7 +677,7 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
- ltr_index, i, map[index].name, ltr_raw_data,
+ ltr_index, pmc_idx, map[index].name, ltr_raw_data,
decoded_non_snoop_ltr,
decoded_snoop_ltr, ltr_ign_data);
ltr_index++;
@@ -689,15 +690,15 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int pmcidx;
+ unsigned int pmc_idx;
- for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
const struct pmc_bit_map **maps;
unsigned int arr_size, r_idx;
u32 offset, counter;
struct pmc *pmc;
- pmc = pmcdev->pmcs[pmcidx];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
maps = pmc->map->s0ix_blocker_maps;
@@ -711,7 +712,7 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
if (!map->blk)
continue;
counter = pmc_core_reg_read(pmc, offset);
- seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
+ seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_idx,
map->name, counter);
offset += map->blk * S0IX_BLK_SIZE;
}
@@ -723,13 +724,13 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
{
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
struct pmc *pmc;
u32 ltr_ign;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -750,12 +751,12 @@ static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
{
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
struct pmc *pmc;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -794,10 +795,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u32 offset;
@@ -805,7 +806,7 @@ static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
continue;
maps = pmc->map->lpm_sts;
offset = pmc->map->lpm_status_offset;
- pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
+ pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "STATUS", maps);
}
return 0;
@@ -815,10 +816,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u32 offset;
@@ -826,7 +827,7 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
continue;
maps = pmc->map->lpm_sts;
offset = pmc->map->lpm_live_status_offset;
- pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
+ pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "LIVE_STATUS", maps);
}
return 0;
@@ -919,11 +920,11 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
u32 sts_offset;
u32 sts_offset_live;
u32 *lpm_req_regs;
- unsigned int mp, pmc_index;
+ unsigned int mp, pmc_idx;
int num_maps;
- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
- struct pmc *pmc = pmcdev->pmcs[pmc_index];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
if (!pmc)
@@ -944,7 +945,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
continue;
/* Display the header */
- pmc_core_substate_req_header_show(s, pmc_index, HEADER_STATUS);
+ pmc_core_substate_req_header_show(s, pmc_idx, HEADER_STATUS);
/* Loop over maps */
for (mp = 0; mp < num_maps; mp++) {
@@ -982,7 +983,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
}
/* Display the element name in the first column */
- seq_printf(s, "pmc%d: %34s |", pmc_index, map[i].name);
+ seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name);
/* Loop over the enabled states and display if required */
pmc_for_each_mode(mode, pmcdev) {
@@ -1281,7 +1282,20 @@ int get_primary_reg_base(struct pmc *pmc)
return 0;
}
-void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
+static struct telem_endpoint *pmc_core_register_endpoint(struct pci_dev *pcidev, u32 *guids)
+{
+ struct telem_endpoint *ep;
+ unsigned int i;
+
+ for (i = 0; guids[i]; i++) {
+ ep = pmt_telem_find_and_register_endpoint(pcidev, guids[i], 0);
+ if (!IS_ERR(ep))
+ return ep;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids)
{
struct telem_endpoint *ep;
struct pci_dev *pcidev;
@@ -1292,7 +1306,7 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
return;
}
- ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ ep = pmc_core_register_endpoint(pcidev, guids);
pci_dev_put(pcidev);
if (IS_ERR(ep)) {
dev_err(&pmcdev->pdev->dev,
@@ -1302,8 +1316,6 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
}
pmcdev->punit_ep = ep;
-
- pmcdev->has_die_c6 = true;
pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET;
}
@@ -1423,22 +1435,13 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info
pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency);
}
- if (pmcdev->has_die_c6) {
+ if (pmcdev->punit_ep) {
debugfs_create_file("die_c6_us_show", 0444,
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_die_c6_us_fops);
}
}
-static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
-{
- for (; list->map; ++list)
- if (list->map == map)
- return list->guid;
-
- return 0;
-}
-
/*
* This function retrieves low power mode requirement data from PMC Low
* Power Mode (LPM) table.
@@ -1553,26 +1556,24 @@ static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info *
{
struct pci_dev *pcidev __free(pci_dev_put) = NULL;
struct telem_endpoint *ep;
- unsigned int i;
- u32 guid;
+ unsigned int pmc_idx;
int ret;
pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func));
if (!pcidev)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
struct pmc *pmc;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
- guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
- if (!guid)
+ if (!pmc->map->lpm_req_guid)
return -ENXIO;
- ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0);
if (IS_ERR(ep)) {
dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
return -EPROBE_DEFER;
@@ -1596,7 +1597,7 @@ static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16
return NULL;
}
-static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
+static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_idx)
{
struct pmc_ssram_telemetry pmc_ssram_telemetry;
@@ -1604,7 +1605,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
struct pmc *pmc;
int ret;
- ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry);
+ ret = pmc_ssram_telemetry_get_pmc_info(pmc_idx, &pmc_ssram_telemetry);
if (ret)
return ret;
@@ -1612,7 +1613,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
if (!map)
return -ENODEV;
- pmc = pmcdev->pmcs[pmc_index];
+ pmc = pmcdev->pmcs[pmc_idx];
/* Memory for primary PMC has been allocated */
if (!pmc) {
pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
@@ -1629,7 +1630,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
return -ENOMEM;
}
- pmcdev->pmcs[pmc_index] = pmc;
+ pmcdev->pmcs[pmc_idx] = pmc;
return 0;
}
@@ -1689,8 +1690,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
}
pmc_core_get_low_power_modes(pmcdev);
- if (pmc_dev_info->dmu_guid)
- pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
+ if (pmc_dev_info->dmu_guids)
+ pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guids);
if (ssram) {
ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info);
@@ -1701,8 +1702,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
return 0;
unmap_regbase:
- for (unsigned int i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (unsigned int pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (pmc && pmc->regbase)
iounmap(pmc->regbase);
@@ -1795,10 +1796,10 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc)
static void pmc_core_clean_structure(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (pmc && pmc->regbase)
iounmap(pmc->regbase);
@@ -1958,7 +1959,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
int offset = pmc->map->lpm_status_offset;
- unsigned int i;
+ unsigned int pmc_idx, i;
/* Check if the syspend used S0ix */
if (pm_suspend_via_firmware())
@@ -1996,13 +1997,13 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
if (pmc->map->slps0_dbg_maps)
pmc_core_slps0_display(pmc, dev, NULL);
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
if (pmc->map->lpm_sts)
- pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
+ pmc_core_lpm_display(pmc, dev, NULL, offset, pmc_idx, "STATUS", maps);
}
return 0;
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index f4dadb696a31..272fb4f57f34 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -282,7 +282,8 @@ enum ppfear_regs {
/* Die C6 from PUNIT telemetry */
#define MTL_PMT_DMU_DIE_C6_OFFSET 15
#define MTL_PMT_DMU_GUID 0x1A067102
-#define ARL_PMT_DMU_GUID 0x1A06A000
+#define ARL_PMT_DMU_GUID 0x1A06A102
+#define ARL_H_PMT_DMU_GUID 0x1A06A101
#define LNL_PMC_MMIO_REG_LEN 0x2708
#define LNL_PMC_LTR_OSSE 0x1B88
@@ -303,6 +304,8 @@ enum ppfear_regs {
/* Wildcat Lake */
#define WCL_PMC_LTR_RESERVED 0x1B64
#define WCL_PCD_PMC_MMIO_REG_LEN 0x3178
+#define WCL_NUM_S0IX_BLOCKER 94
+#define WCL_BLK_REQ_OFFSET 50
/* SSRAM PMC Device ID */
/* LNL */
@@ -355,6 +358,7 @@ struct pmc_bit_map {
* @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter
* @num_s0ix_blocker: Number of S0ix blockers
* @blocker_req_offset: Telemetry offset to S0ix blocker low power mode substate requirement table
+ * @lpm_req_guid: Telemetry GUID to read low power mode substate requirement table
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -396,6 +400,8 @@ struct pmc_reg_map {
const u8 *lpm_reg_index;
const u32 pson_residency_offset;
const u32 pson_residency_counter_step;
+ /* GUID for telemetry regions */
+ const u32 lpm_req_guid;
};
/**
@@ -405,7 +411,6 @@ struct pmc_reg_map {
* specific attributes
*/
struct pmc_info {
- u32 guid;
u16 devid;
const struct pmc_reg_map *map;
};
@@ -465,7 +470,6 @@ struct pmc_dev {
u64 *pkgc_res_cnt;
u8 num_of_pkgc;
- bool has_die_c6;
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
struct pmc_info *regmap_list;
@@ -481,7 +485,7 @@ enum pmc_index {
/**
* struct pmc_dev_info - Structure to keep PMC device info
* @pci_func: Function number of the primary PMC
- * @dmu_guid: Die Management Unit GUID
+ * @dmu_guids: List of Die Management Unit GUID
* @regmap_list: Pointer to a list of pmc_info structure that could be
* available for the platform. When set, this field implies
* SSRAM support.
@@ -495,7 +499,7 @@ enum pmc_index {
*/
struct pmc_dev_info {
u8 pci_func;
- u32 dmu_guid;
+ u32 *dmu_guids;
struct pmc_info *regmap_list;
const struct pmc_reg_map *map;
const struct file_operations *sub_req_show;
@@ -532,7 +536,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
int get_primary_reg_base(struct pmc *pmc);
void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
-void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids);
void pmc_core_set_device_d3(unsigned int device);
int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index 6fa027e7071f..1cd81ee54dcf 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -533,11 +533,11 @@ static const struct pmc_reg_map lnl_socm_reg_map = {
.s0ix_blocker_maps = lnl_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
.lpm_reg_index = LNL_LPM_REG_INDEX,
+ .lpm_req_guid = SOCM_LPM_REQ_GUID,
};
static struct pmc_info lnl_pmc_info_list[] = {
{
- .guid = SOCM_LPM_REQ_GUID,
.devid = PMC_DEVID_LNL_SOCM,
.map = &lnl_socm_reg_map,
},
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 0b87e10f864e..57508cbf9cd4 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -473,6 +473,7 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = SOCP_LPM_REQ_GUID,
};
static const struct pmc_bit_map mtl_ioep_pfear_map[] = {
@@ -797,6 +798,7 @@ const struct pmc_reg_map mtl_ioep_reg_map = {
.lpm_en_offset = MTL_LPM_EN_OFFSET,
.lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = IOEP_LPM_REQ_GUID,
};
static const struct pmc_bit_map mtl_ioem_pfear_map[] = {
@@ -944,21 +946,19 @@ static const struct pmc_reg_map mtl_ioem_reg_map = {
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
.lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = IOEM_LPM_REQ_GUID,
};
static struct pmc_info mtl_pmc_info_list[] = {
{
- .guid = SOCP_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_SOCM,
.map = &mtl_socm_reg_map,
},
{
- .guid = IOEP_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
- .guid = IOEM_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_IOEM,
.map = &mtl_ioem_reg_map
},
@@ -992,9 +992,10 @@ static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
return generic_core_init(pmcdev, pmc_dev_info);
}
+static u32 MTL_PMT_DMU_GUIDS[] = {MTL_PMT_DMU_GUID, 0x0};
struct pmc_dev_info mtl_pmc_dev = {
.pci_func = 2,
- .dmu_guid = MTL_PMT_DMU_GUID,
+ .dmu_guids = MTL_PMT_DMU_GUIDS,
.regmap_list = mtl_pmc_info_list,
.map = &mtl_socm_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c
index 1b35b84e06fa..1f48e2bbc699 100644
--- a/drivers/platform/x86/intel/pmc/ptl.c
+++ b/drivers/platform/x86/intel/pmc/ptl.c
@@ -528,16 +528,15 @@ static const struct pmc_reg_map ptl_pcdp_reg_map = {
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
.num_s0ix_blocker = PTL_NUM_S0IX_BLOCKER,
.blocker_req_offset = PTL_BLK_REQ_OFFSET,
+ .lpm_req_guid = PCDP_LPM_REQ_GUID,
};
static struct pmc_info ptl_pmc_info_list[] = {
{
- .guid = PCDP_LPM_REQ_GUID,
.devid = PMC_DEVID_PTL_PCDH,
.map = &ptl_pcdp_reg_map,
},
{
- .guid = PCDP_LPM_REQ_GUID,
.devid = PMC_DEVID_PTL_PCDP,
.map = &ptl_pcdp_reg_map,
},
diff --git a/drivers/platform/x86/intel/pmc/wcl.c b/drivers/platform/x86/intel/pmc/wcl.c
index 85e90a639e65..a45707e6364f 100644
--- a/drivers/platform/x86/intel/pmc/wcl.c
+++ b/drivers/platform/x86/intel/pmc/wcl.c
@@ -11,6 +11,9 @@
#include "core.h"
+/* PMC SSRAM PMT Telemetry GUIDS */
+#define PCDN_LPM_REQ_GUID 0x33747648
+
static const struct pmc_bit_map wcl_pcdn_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
@@ -453,6 +456,17 @@ static const struct pmc_reg_map wcl_pcdn_reg_map = {
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.s0ix_blocker_maps = wcl_pcdn_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+ .num_s0ix_blocker = WCL_NUM_S0IX_BLOCKER,
+ .blocker_req_offset = WCL_BLK_REQ_OFFSET,
+ .lpm_req_guid = PCDN_LPM_REQ_GUID,
+};
+
+static struct pmc_info wcl_pmc_info_list[] = {
+ {
+ .devid = PMC_DEVID_WCL_PCDN,
+ .map = &wcl_pcdn_reg_map,
+ },
+ {}
};
#define WCL_NPU_PCI_DEV 0xfd3e
@@ -479,8 +493,12 @@ static int wcl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
}
struct pmc_dev_info wcl_pmc_dev = {
+ .pci_func = 2,
+ .regmap_list = wcl_pmc_info_list,
.map = &wcl_pcdn_reg_map,
+ .sub_req_show = &pmc_core_substate_blk_req_fops,
.suspend = cnl_suspend,
.resume = wcl_resume,
.init = wcl_core_init,
+ .sub_req = pmc_core_pmt_get_blk_sub_req,
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index f66f0ce8559b..ecfc7703f201 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -765,6 +765,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d
+#define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
@@ -776,6 +777,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/lenovo/ideapad-laptop.c b/drivers/platform/x86/lenovo/ideapad-laptop.c
index fcebfbaf0460..5171a077f62c 100644
--- a/drivers/platform/x86/lenovo/ideapad-laptop.c
+++ b/drivers/platform/x86/lenovo/ideapad-laptop.c
@@ -31,6 +31,7 @@
#include <linux/power_supply.h>
#include <linux/rfkill.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/wmi.h>
@@ -62,13 +63,27 @@ enum {
CFG_OSD_CAM_BIT = 31,
};
+/*
+ * There are two charge modes supported by the GBMD/SBMC interface:
+ * - "Rapid Charge": increase power to speed up charging
+ * - "Conservation Mode": stop charging at 60-80% (depends on model)
+ *
+ * The interface doesn't prohibit enabling both modes at the same time.
+ * However, doing so is essentially meaningless, and the manufacturer utilities
+ * on Windows always make them mutually exclusive.
+ */
+
enum {
+ GBMD_RAPID_CHARGE_STATE_BIT = 2,
GBMD_CONSERVATION_STATE_BIT = 5,
+ GBMD_RAPID_CHARGE_SUPPORTED_BIT = 17,
};
enum {
SBMC_CONSERVATION_ON = 3,
SBMC_CONSERVATION_OFF = 5,
+ SBMC_RAPID_CHARGE_ON = 7,
+ SBMC_RAPID_CHARGE_OFF = 8,
};
enum {
@@ -158,6 +173,7 @@ struct ideapad_rfk_priv {
struct ideapad_private {
struct acpi_device *adev;
struct mutex vpc_mutex; /* protects the VPC calls */
+ struct mutex gbmd_sbmc_mutex; /* protects GBMD/SBMC calls */
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
@@ -166,9 +182,11 @@ struct ideapad_private {
struct ideapad_dytc_priv *dytc;
struct dentry *debug;
struct acpi_battery_hook battery_hook;
+ const struct power_supply_ext *battery_ext;
unsigned long cfg;
unsigned long r_touchpad_val;
struct {
+ bool rapid_charge : 1;
bool conservation_mode : 1;
bool dytc : 1;
bool fan_mode : 1;
@@ -455,37 +473,40 @@ static int debugfs_status_show(struct seq_file *s, void *data)
struct ideapad_private *priv = s->private;
unsigned long value;
- guard(mutex)(&priv->vpc_mutex);
-
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
- seq_printf(s, "Backlight max: %lu\n", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
- seq_printf(s, "Backlight now: %lu\n", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
- seq_printf(s, "BL power value: %s (%lu)\n", value ? "on" : "off", value);
-
- seq_puts(s, "=====================\n");
-
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
- seq_printf(s, "Radio status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
- seq_printf(s, "Wifi status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
- seq_printf(s, "BT status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
- seq_printf(s, "3G status: %s (%lu)\n", value ? "on" : "off", value);
+ scoped_guard(mutex, &priv->vpc_mutex) {
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
+ seq_printf(s, "Backlight max: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
+ seq_printf(s, "Backlight now: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
+ seq_printf(s, "BL power value: %s (%lu)\n", str_on_off(value), value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
+ seq_printf(s, "Radio status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
+ seq_printf(s, "Wifi status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
+ seq_printf(s, "BT status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
+ seq_printf(s, "3G status: %s (%lu)\n", str_on_off(value), value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
+ seq_printf(s, "Touchpad status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
+ seq_printf(s, "Camera status: %s (%lu)\n", str_on_off(value), value);
+ }
seq_puts(s, "=====================\n");
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
- seq_printf(s, "Touchpad status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
- seq_printf(s, "Camera status: %s (%lu)\n", value ? "on" : "off", value);
-
- seq_puts(s, "=====================\n");
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ if (!eval_gbmd(priv->adev->handle, &value))
+ seq_printf(s, "GBMD: %#010lx\n", value);
+ }
- if (!eval_gbmd(priv->adev->handle, &value))
- seq_printf(s, "GBMD: %#010lx\n", value);
if (!eval_hals(priv->adev->handle, &value))
seq_printf(s, "HALS: %#010lx\n", value);
@@ -622,10 +643,16 @@ static ssize_t conservation_mode_show(struct device *dev,
show_conservation_mode_deprecation_warning(dev);
- err = eval_gbmd(priv->adev->handle, &result);
- if (err)
- return err;
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
+ }
+ /*
+ * For backward compatibility, ignore Rapid Charge while reporting the
+ * state of Conservation Mode.
+ */
return sysfs_emit(buf, "%d\n", !!test_bit(GBMD_CONSERVATION_STATE_BIT, &result));
}
@@ -643,6 +670,18 @@ static ssize_t conservation_mode_store(struct device *dev,
if (err)
return err;
+ guard(mutex)(&priv->gbmd_sbmc_mutex);
+
+ /*
+ * Prevent mutually exclusive modes from being set at the same time,
+ * but do not disable Rapid Charge while disabling Conservation Mode.
+ */
+ if (priv->features.rapid_charge && state) {
+ err = exec_sbmc(priv->adev->handle, SBMC_RAPID_CHARGE_OFF);
+ if (err)
+ return err;
+ }
+
err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
if (err)
return err;
@@ -2007,15 +2046,39 @@ static int ideapad_psy_ext_set_prop(struct power_supply *psy,
const union power_supply_propval *val)
{
struct ideapad_private *priv = ext_data;
+ unsigned long op1, op2;
+ int err;
switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ if (WARN_ON(!priv->features.rapid_charge))
+ return -EINVAL;
+
+ op1 = SBMC_CONSERVATION_OFF;
+ op2 = SBMC_RAPID_CHARGE_ON;
+ break;
case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE:
- return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_ON);
+ op1 = SBMC_RAPID_CHARGE_OFF;
+ op2 = SBMC_CONSERVATION_ON;
+ break;
case POWER_SUPPLY_CHARGE_TYPE_STANDARD:
- return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_OFF);
+ op1 = SBMC_RAPID_CHARGE_OFF;
+ op2 = SBMC_CONSERVATION_OFF;
+ break;
default:
return -EINVAL;
}
+
+ guard(mutex)(&priv->gbmd_sbmc_mutex);
+
+ /* If !rapid_charge, op1 must be SBMC_RAPID_CHARGE_OFF. Skip it. */
+ if (priv->features.rapid_charge) {
+ err = exec_sbmc(priv->adev->handle, op1);
+ if (err)
+ return err;
+ }
+
+ return exec_sbmc(priv->adev->handle, op2);
}
static int ideapad_psy_ext_get_prop(struct power_supply *psy,
@@ -2025,14 +2088,29 @@ static int ideapad_psy_ext_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct ideapad_private *priv = ext_data;
+ bool is_rapid_charge, is_conservation;
unsigned long result;
int err;
- err = eval_gbmd(priv->adev->handle, &result);
- if (err)
- return err;
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
+ }
+
+ is_rapid_charge = (priv->features.rapid_charge &&
+ test_bit(GBMD_RAPID_CHARGE_STATE_BIT, &result));
+ is_conservation = test_bit(GBMD_CONSERVATION_STATE_BIT, &result);
+
+ if (unlikely(is_rapid_charge && is_conservation)) {
+ dev_err(&priv->platform_device->dev,
+ "unexpected charge_types: both [Fast] and [Long_Life] are enabled\n");
+ return -EINVAL;
+ }
- if (test_bit(GBMD_CONSERVATION_STATE_BIT, &result))
+ if (is_rapid_charge)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (is_conservation)
val->intval = POWER_SUPPLY_CHARGE_TYPE_LONGLIFE;
else
val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
@@ -2052,29 +2130,42 @@ static const enum power_supply_property ideapad_power_supply_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPES,
};
-static const struct power_supply_ext ideapad_battery_ext = {
- .name = "ideapad_laptop",
- .properties = ideapad_power_supply_props,
- .num_properties = ARRAY_SIZE(ideapad_power_supply_props),
- .charge_types = (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
- BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)),
- .get_property = ideapad_psy_ext_get_prop,
- .set_property = ideapad_psy_ext_set_prop,
- .property_is_writeable = ideapad_psy_prop_is_writeable,
-};
+#define DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(_name, _charge_types) \
+ static const struct power_supply_ext _name = { \
+ .name = "ideapad_laptop", \
+ .properties = ideapad_power_supply_props, \
+ .num_properties = ARRAY_SIZE(ideapad_power_supply_props), \
+ .charge_types = _charge_types, \
+ .get_property = ideapad_psy_ext_get_prop, \
+ .set_property = ideapad_psy_ext_set_prop, \
+ .property_is_writeable = ideapad_psy_prop_is_writeable, \
+ }
+
+DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v1,
+ (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE))
+);
+
+DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v2,
+ (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_FAST) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE))
+);
static int ideapad_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook);
- return power_supply_register_extension(battery, &ideapad_battery_ext,
+ return power_supply_register_extension(battery, priv->battery_ext,
&priv->platform_device->dev, priv);
}
static int ideapad_battery_remove(struct power_supply *battery,
struct acpi_battery_hook *hook)
{
- power_supply_unregister_extension(battery, &ideapad_battery_ext);
+ struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook);
+
+ power_supply_unregister_extension(battery, priv->battery_ext);
return 0;
}
@@ -2099,14 +2190,25 @@ static int ideapad_check_features(struct ideapad_private *priv)
priv->features.fan_mode = true;
if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC")) {
- priv->features.conservation_mode = true;
- priv->battery_hook.add_battery = ideapad_battery_add;
- priv->battery_hook.remove_battery = ideapad_battery_remove;
- priv->battery_hook.name = "Ideapad Battery Extension";
-
- err = devm_battery_hook_register(&priv->platform_device->dev, &priv->battery_hook);
- if (err)
- return err;
+ /* Not acquiring gbmd_sbmc_mutex as race condition is impossible on init */
+ if (!eval_gbmd(handle, &val)) {
+ priv->features.conservation_mode = true;
+ priv->features.rapid_charge = test_bit(GBMD_RAPID_CHARGE_SUPPORTED_BIT,
+ &val);
+
+ priv->battery_ext = priv->features.rapid_charge
+ ? &ideapad_battery_ext_v2
+ : &ideapad_battery_ext_v1;
+
+ priv->battery_hook.add_battery = ideapad_battery_add;
+ priv->battery_hook.remove_battery = ideapad_battery_remove;
+ priv->battery_hook.name = "Ideapad Battery Extension";
+
+ err = devm_battery_hook_register(&priv->platform_device->dev,
+ &priv->battery_hook);
+ if (err)
+ return err;
+ }
}
if (acpi_has_method(handle, "DYTC"))
@@ -2292,6 +2394,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (err)
return err;
+ err = devm_mutex_init(&pdev->dev, &priv->gbmd_sbmc_mutex);
+ if (err)
+ return err;
+
err = ideapad_check_features(priv);
if (err)
return err;
diff --git a/drivers/platform/x86/lenovo/wmi-gamezone.c b/drivers/platform/x86/lenovo/wmi-gamezone.c
index 0eb7fe8222f4..381836d29a96 100644
--- a/drivers/platform/x86/lenovo/wmi-gamezone.c
+++ b/drivers/platform/x86/lenovo/wmi-gamezone.c
@@ -171,14 +171,10 @@ static int lwmi_gz_profile_get(struct device *dev,
*profile = PLATFORM_PROFILE_BALANCED;
break;
case LWMI_GZ_THERMAL_MODE_PERFORMANCE:
- if (priv->extreme_supported) {
- *profile = PLATFORM_PROFILE_BALANCED_PERFORMANCE;
- break;
- }
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case LWMI_GZ_THERMAL_MODE_EXTREME:
- *profile = PLATFORM_PROFILE_PERFORMANCE;
+ *profile = PLATFORM_PROFILE_MAX_POWER;
break;
case LWMI_GZ_THERMAL_MODE_CUSTOM:
*profile = PLATFORM_PROFILE_CUSTOM;
@@ -218,16 +214,12 @@ static int lwmi_gz_profile_set(struct device *dev,
case PLATFORM_PROFILE_BALANCED:
mode = LWMI_GZ_THERMAL_MODE_BALANCED;
break;
- case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
- mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE;
- break;
case PLATFORM_PROFILE_PERFORMANCE:
- if (priv->extreme_supported) {
- mode = LWMI_GZ_THERMAL_MODE_EXTREME;
- break;
- }
mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE;
break;
+ case PLATFORM_PROFILE_MAX_POWER:
+ mode = LWMI_GZ_THERMAL_MODE_EXTREME;
+ break;
case PLATFORM_PROFILE_CUSTOM:
mode = LWMI_GZ_THERMAL_MODE_CUSTOM;
break;
@@ -274,8 +266,23 @@ static const struct dmi_system_id fwbug_list[] = {
},
.driver_data = &quirk_no_extreme_bug,
},
+ {
+ .ident = "Legion Go 8ASP2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8ASP2"),
+ },
+ .driver_data = &quirk_no_extreme_bug,
+ },
+ {
+ .ident = "Legion Go 8AHP2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8AHP2"),
+ },
+ .driver_data = &quirk_no_extreme_bug,
+ },
{},
-
};
/**
@@ -338,7 +345,7 @@ static int lwmi_gz_platform_profile_probe(void *drvdata, unsigned long *choices)
priv->extreme_supported = lwmi_gz_extreme_supported(profile_support_ver);
if (priv->extreme_supported)
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_MAX_POWER, choices);
return 0;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 6af6cf477c5b..f92e89c75db9 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -19,6 +19,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <acpi/battery.h>
@@ -42,6 +43,7 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
#define LG_ADDRESS_SPACE_ID 0x8F
#define LG_ADDRESS_SPACE_DEBUG_FLAG_ADR 0x00
+#define LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR 0x01
#define LG_ADDRESS_SPACE_FAN_MODE_ADR 0x03
#define LG_ADDRESS_SPACE_DTTM_FLAG_ADR 0x20
@@ -668,6 +670,15 @@ static acpi_status lg_laptop_address_space_write(struct device *dev, acpi_physic
byte = value & 0xFF;
switch (address) {
+ case LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR:
+ /*
+ * The HD audio power field is not affected by the DTTM flag,
+ * so we have to manually check fw_debug.
+ */
+ if (fw_debug)
+ dev_dbg(dev, "HD audio power %s\n", str_enabled_disabled(byte));
+
+ return AE_OK;
case LG_ADDRESS_SPACE_FAN_MODE_ADR:
/*
* The fan mode field is not affected by the DTTM flag, so we
diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c
index 54377b282ff8..144a454103b9 100644
--- a/drivers/platform/x86/oxpec.c
+++ b/drivers/platform/x86/oxpec.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Platform driver for OneXPlayer and AOKZOE devices. For the time being,
- * it also exposes fan controls for AYANEO, and OrangePi Handhelds via
- * hwmon sysfs.
+ * Platform driver for OneXPlayer and AOKZOE devices.
*
* Fan control is provided via pwm interface in the range [0-255].
* Old AMD boards use [0-100] as range in the EC, the written value is
@@ -43,14 +41,6 @@ static bool unlock_global_acpi_lock(void)
enum oxp_board {
aok_zoe_a1 = 1,
- aya_neo_2,
- aya_neo_air,
- aya_neo_air_1s,
- aya_neo_air_plus_mendo,
- aya_neo_air_pro,
- aya_neo_flip,
- aya_neo_geek,
- aya_neo_kun,
orange_pi_neo,
oxp_2,
oxp_fly,
@@ -133,62 +123,6 @@ static const struct dmi_system_id dmi_table[] = {
},
{
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
- },
- .driver_data = (void *)aya_neo_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
- },
- .driver_data = (void *)aya_neo_air,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
- },
- .driver_data = (void *)aya_neo_air_1s,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
- },
- .driver_data = (void *)aya_neo_air_plus_mendo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
- },
- .driver_data = (void *)aya_neo_air_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
- },
- .driver_data = (void *)aya_neo_flip,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
- },
- .driver_data = (void *)aya_neo_geek,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
- },
- .driver_data = (void *)aya_neo_kun,
- },
- {
- .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
},
@@ -672,13 +606,6 @@ static int oxp_pwm_enable(void)
case orange_pi_neo:
return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -699,14 +626,6 @@ static int oxp_pwm_disable(void)
case orange_pi_neo:
return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -727,14 +646,6 @@ static int oxp_pwm_read(long *val)
case orange_pi_neo:
return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -774,14 +685,6 @@ static int oxp_pwm_fan_speed(long *val)
case oxp_g1_i:
return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_fly:
case oxp_mini_amd:
case oxp_mini_amd_a07:
@@ -810,14 +713,6 @@ static int oxp_pwm_input_write(long val)
/* scale to range [0-184] */
val = (val * 184) / 255;
return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
/* scale to range [0-100] */
@@ -854,14 +749,6 @@ static int oxp_pwm_input_read(long *val)
/* scale from range [0-184] */
*val = (*val * 255) / 184;
break;
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index db030b0f176a..1a369334f9cb 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -22,6 +22,7 @@
#define IRQ_RESOURCE_GPIO 1
#define IRQ_RESOURCE_APIC 2
#define IRQ_RESOURCE_AUTO 3
+#define IRQ_RESOURCE_OPT BIT(2)
enum smi_bus_type {
SMI_I2C,
@@ -64,6 +65,10 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
dev_dbg(&pdev->dev, "Using platform irq\n");
break;
}
+ if (inst->flags & IRQ_RESOURCE_OPT) {
+ dev_dbg(&pdev->dev, "No irq\n");
+ return 0;
+ }
break;
case IRQ_RESOURCE_GPIO:
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
@@ -386,10 +391,10 @@ static const struct smi_node cs35l57_hda = {
static const struct smi_node tas2781_hda = {
.instances = {
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
{}
},
.bus_type = SMI_AUTO_DETECT,
diff --git a/drivers/platform/x86/uniwill/Kconfig b/drivers/platform/x86/uniwill/Kconfig
new file mode 100644
index 000000000000..d07cc8440188
--- /dev/null
+++ b/drivers/platform/x86/uniwill/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Uniwill X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DRIVERS_UNIWILL
+ bool "Uniwill X86 Platform Specific Device Drivers"
+ depends on X86_PLATFORM_DEVICES
+ help
+ Say Y here to see options for device drivers for various
+ Uniwill x86 platforms, including many OEM laptops originally
+ manufactured by Uniwill.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DRIVERS_UNIWILL
+
+config UNIWILL_LAPTOP
+ tristate "Uniwill Laptop Extras"
+ default m
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on ACPI_BATTERY
+ depends on HWMON
+ depends on INPUT
+ depends on LEDS_CLASS_MULTICOLOR
+ depends on DMI
+ select REGMAP
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds support for various extra features found on Uniwill laptops,
+ like the lightbar, hwmon sensors and hotkeys. It also supports many OEM laptops
+ originally manufactured by Uniwill.
+
+ If you have such a laptop, say Y or M here.
+
+endif
diff --git a/drivers/platform/x86/uniwill/Makefile b/drivers/platform/x86/uniwill/Makefile
new file mode 100644
index 000000000000..05cd1747a240
--- /dev/null
+++ b/drivers/platform/x86/uniwill/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for linux/drivers/platform/x86/uniwill
+# Uniwill X86 Platform Specific Drivers
+#
+
+obj-$(CONFIG_UNIWILL_LAPTOP) += uniwill-laptop.o
+uniwill-laptop-y := uniwill-acpi.o uniwill-wmi.o
diff --git a/drivers/platform/x86/uniwill/uniwill-acpi.c b/drivers/platform/x86/uniwill/uniwill-acpi.c
new file mode 100644
index 000000000000..bd7e63dd5181
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-acpi.c
@@ -0,0 +1,1912 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for Uniwill notebooks.
+ *
+ * Special thanks go to Pőcze Barnabás, Christoffer Sandberg and Werner Sembach
+ * for supporting the development of this driver either through prior work or
+ * by answering questions regarding the underlying ACPI and WMI interfaces.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/fixp-arith.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <acpi/battery.h>
+
+#include "uniwill-wmi.h"
+
+#define EC_ADDR_BAT_POWER_UNIT_1 0x0400
+
+#define EC_ADDR_BAT_POWER_UNIT_2 0x0401
+
+#define EC_ADDR_BAT_DESIGN_CAPACITY_1 0x0402
+
+#define EC_ADDR_BAT_DESIGN_CAPACITY_2 0x0403
+
+#define EC_ADDR_BAT_FULL_CAPACITY_1 0x0404
+
+#define EC_ADDR_BAT_FULL_CAPACITY_2 0x0405
+
+#define EC_ADDR_BAT_DESIGN_VOLTAGE_1 0x0408
+
+#define EC_ADDR_BAT_DESIGN_VOLTAGE_2 0x0409
+
+#define EC_ADDR_BAT_STATUS_1 0x0432
+#define BAT_DISCHARGING BIT(0)
+
+#define EC_ADDR_BAT_STATUS_2 0x0433
+
+#define EC_ADDR_BAT_CURRENT_1 0x0434
+
+#define EC_ADDR_BAT_CURRENT_2 0x0435
+
+#define EC_ADDR_BAT_REMAIN_CAPACITY_1 0x0436
+
+#define EC_ADDR_BAT_REMAIN_CAPACITY_2 0x0437
+
+#define EC_ADDR_BAT_VOLTAGE_1 0x0438
+
+#define EC_ADDR_BAT_VOLTAGE_2 0x0439
+
+#define EC_ADDR_CPU_TEMP 0x043E
+
+#define EC_ADDR_GPU_TEMP 0x044F
+
+#define EC_ADDR_MAIN_FAN_RPM_1 0x0464
+
+#define EC_ADDR_MAIN_FAN_RPM_2 0x0465
+
+#define EC_ADDR_SECOND_FAN_RPM_1 0x046C
+
+#define EC_ADDR_SECOND_FAN_RPM_2 0x046D
+
+#define EC_ADDR_DEVICE_STATUS 0x047B
+#define WIFI_STATUS_ON BIT(7)
+/* BIT(5) is also unset depending on the rfkill state (bluetooth?) */
+
+#define EC_ADDR_BAT_ALERT 0x0494
+
+#define EC_ADDR_BAT_CYCLE_COUNT_1 0x04A6
+
+#define EC_ADDR_BAT_CYCLE_COUNT_2 0x04A7
+
+#define EC_ADDR_PROJECT_ID 0x0740
+
+#define EC_ADDR_AP_OEM 0x0741
+#define ENABLE_MANUAL_CTRL BIT(0)
+#define ITE_KBD_EFFECT_REACTIVE BIT(3)
+#define FAN_ABNORMAL BIT(5)
+
+#define EC_ADDR_SUPPORT_5 0x0742
+#define FAN_TURBO_SUPPORTED BIT(4)
+#define FAN_SUPPORT BIT(5)
+
+#define EC_ADDR_CTGP_DB_CTRL 0x0743
+#define CTGP_DB_GENERAL_ENABLE BIT(0)
+#define CTGP_DB_DB_ENABLE BIT(1)
+#define CTGP_DB_CTGP_ENABLE BIT(2)
+
+#define EC_ADDR_CTGP_OFFSET 0x0744
+
+#define EC_ADDR_TPP_OFFSET 0x0745
+
+#define EC_ADDR_MAX_TGP 0x0746
+
+#define EC_ADDR_LIGHTBAR_AC_CTRL 0x0748
+#define LIGHTBAR_APP_EXISTS BIT(0)
+#define LIGHTBAR_POWER_SAVE BIT(1)
+#define LIGHTBAR_S0_OFF BIT(2)
+#define LIGHTBAR_S3_OFF BIT(3) // Breathing animation when suspended
+#define LIGHTBAR_WELCOME BIT(7) // Rainbow animation
+
+#define EC_ADDR_LIGHTBAR_AC_RED 0x0749
+
+#define EC_ADDR_LIGHTBAR_AC_GREEN 0x074A
+
+#define EC_ADDR_LIGHTBAR_AC_BLUE 0x074B
+
+#define EC_ADDR_BIOS_OEM 0x074E
+#define FN_LOCK_STATUS BIT(4)
+
+#define EC_ADDR_MANUAL_FAN_CTRL 0x0751
+#define FAN_LEVEL_MASK GENMASK(2, 0)
+#define FAN_MODE_TURBO BIT(4)
+#define FAN_MODE_HIGH BIT(5)
+#define FAN_MODE_BOOST BIT(6)
+#define FAN_MODE_USER BIT(7)
+
+#define EC_ADDR_PWM_1 0x075B
+
+#define EC_ADDR_PWM_2 0x075C
+
+/* Unreliable */
+#define EC_ADDR_SUPPORT_1 0x0765
+#define AIRPLANE_MODE BIT(0)
+#define GPS_SWITCH BIT(1)
+#define OVERCLOCK BIT(2)
+#define MACRO_KEY BIT(3)
+#define SHORTCUT_KEY BIT(4)
+#define SUPER_KEY_LOCK BIT(5)
+#define LIGHTBAR BIT(6)
+#define FAN_BOOST BIT(7)
+
+#define EC_ADDR_SUPPORT_2 0x0766
+#define SILENT_MODE BIT(0)
+#define USB_CHARGING BIT(1)
+#define RGB_KEYBOARD BIT(2)
+#define CHINA_MODE BIT(5)
+#define MY_BATTERY BIT(6)
+
+#define EC_ADDR_TRIGGER 0x0767
+#define TRIGGER_SUPER_KEY_LOCK BIT(0)
+#define TRIGGER_LIGHTBAR BIT(1)
+#define TRIGGER_FAN_BOOST BIT(2)
+#define TRIGGER_SILENT_MODE BIT(3)
+#define TRIGGER_USB_CHARGING BIT(4)
+#define RGB_APPLY_COLOR BIT(5)
+#define RGB_LOGO_EFFECT BIT(6)
+#define RGB_RAINBOW_EFFECT BIT(7)
+
+#define EC_ADDR_SWITCH_STATUS 0x0768
+#define SUPER_KEY_LOCK_STATUS BIT(0)
+#define LIGHTBAR_STATUS BIT(1)
+#define FAN_BOOST_STATUS BIT(2)
+#define MACRO_KEY_STATUS BIT(3)
+#define MY_BAT_POWER_BAT_STATUS BIT(4)
+
+#define EC_ADDR_RGB_RED 0x0769
+
+#define EC_ADDR_RGB_GREEN 0x076A
+
+#define EC_ADDR_RGB_BLUE 0x076B
+
+#define EC_ADDR_ROMID_START 0x0770
+#define ROMID_LENGTH 14
+
+#define EC_ADDR_ROMID_EXTRA_1 0x077E
+
+#define EC_ADDR_ROMID_EXTRA_2 0x077F
+
+#define EC_ADDR_BIOS_OEM_2 0x0782
+#define FAN_V2_NEW BIT(0)
+#define FAN_QKEY BIT(1)
+#define FAN_TABLE_OFFICE_MODE BIT(2)
+#define FAN_V3 BIT(3)
+#define DEFAULT_MODE BIT(4)
+
+#define EC_ADDR_PL1_SETTING 0x0783
+
+#define EC_ADDR_PL2_SETTING 0x0784
+
+#define EC_ADDR_PL4_SETTING 0x0785
+
+#define EC_ADDR_FAN_DEFAULT 0x0786
+#define FAN_CURVE_LENGTH 5
+
+#define EC_ADDR_KBD_STATUS 0x078C
+#define KBD_WHITE_ONLY BIT(0) // ~single color
+#define KBD_SINGLE_COLOR_OFF BIT(1)
+#define KBD_TURBO_LEVEL_MASK GENMASK(3, 2)
+#define KBD_APPLY BIT(4)
+#define KBD_BRIGHTNESS GENMASK(7, 5)
+
+#define EC_ADDR_FAN_CTRL 0x078E
+#define FAN3P5 BIT(1)
+#define CHARGING_PROFILE BIT(3)
+#define UNIVERSAL_FAN_CTRL BIT(6)
+
+#define EC_ADDR_BIOS_OEM_3 0x07A3
+#define FAN_REDUCED_DURY_CYCLE BIT(5)
+#define FAN_ALWAYS_ON BIT(6)
+
+#define EC_ADDR_BIOS_BYTE 0x07A4
+#define FN_LOCK_SWITCH BIT(3)
+
+#define EC_ADDR_OEM_3 0x07A5
+#define POWER_LED_MASK GENMASK(1, 0)
+#define POWER_LED_LEFT 0x00
+#define POWER_LED_BOTH 0x01
+#define POWER_LED_NONE 0x02
+#define FAN_QUIET BIT(2)
+#define OVERBOOST BIT(4)
+#define HIGH_POWER BIT(7)
+
+#define EC_ADDR_OEM_4 0x07A6
+#define OVERBOOST_DYN_TEMP_OFF BIT(1)
+#define TOUCHPAD_TOGGLE_OFF BIT(6)
+
+#define EC_ADDR_CHARGE_CTRL 0x07B9
+#define CHARGE_CTRL_MASK GENMASK(6, 0)
+#define CHARGE_CTRL_REACHED BIT(7)
+
+#define EC_ADDR_UNIVERSAL_FAN_CTRL 0x07C5
+#define SPLIT_TABLES BIT(7)
+
+#define EC_ADDR_AP_OEM_6 0x07C6
+#define ENABLE_UNIVERSAL_FAN_CTRL BIT(2)
+#define BATTERY_CHARGE_FULL_OVER_24H BIT(3)
+#define BATTERY_ERM_STATUS_REACHED BIT(4)
+
+#define EC_ADDR_CHARGE_PRIO 0x07CC
+#define CHARGING_PERFORMANCE BIT(7)
+
+/* Same bits as EC_ADDR_LIGHTBAR_AC_CTRL except LIGHTBAR_S3_OFF */
+#define EC_ADDR_LIGHTBAR_BAT_CTRL 0x07E2
+
+#define EC_ADDR_LIGHTBAR_BAT_RED 0x07E3
+
+#define EC_ADDR_LIGHTBAR_BAT_GREEN 0x07E4
+
+#define EC_ADDR_LIGHTBAR_BAT_BLUE 0x07E5
+
+#define EC_ADDR_CPU_TEMP_END_TABLE 0x0F00
+
+#define EC_ADDR_CPU_TEMP_START_TABLE 0x0F10
+
+#define EC_ADDR_CPU_FAN_SPEED_TABLE 0x0F20
+
+#define EC_ADDR_GPU_TEMP_END_TABLE 0x0F30
+
+#define EC_ADDR_GPU_TEMP_START_TABLE 0x0F40
+
+#define EC_ADDR_GPU_FAN_SPEED_TABLE 0x0F50
+
+/*
+ * Those two registers technically allow for manual fan control,
+ * but are unstable on some models and are likely not meant to
+ * be used by applications as they are only accessible when using
+ * the WMI interface.
+ */
+#define EC_ADDR_PWM_1_WRITEABLE 0x1804
+
+#define EC_ADDR_PWM_2_WRITEABLE 0x1809
+
+#define DRIVER_NAME "uniwill"
+
+/*
+ * The OEM software always sleeps up to 6 ms after reading/writing EC
+ * registers, so we emulate this behaviour for maximum compatibility.
+ */
+#define UNIWILL_EC_DELAY_US 6000
+
+#define PWM_MAX 200
+#define FAN_TABLE_LENGTH 16
+
+#define LED_CHANNELS 3
+#define LED_MAX_BRIGHTNESS 200
+
+#define UNIWILL_FEATURE_FN_LOCK_TOGGLE BIT(0)
+#define UNIWILL_FEATURE_SUPER_KEY_TOGGLE BIT(1)
+#define UNIWILL_FEATURE_TOUCHPAD_TOGGLE BIT(2)
+#define UNIWILL_FEATURE_LIGHTBAR BIT(3)
+#define UNIWILL_FEATURE_BATTERY BIT(4)
+#define UNIWILL_FEATURE_HWMON BIT(5)
+
+struct uniwill_data {
+ struct device *dev;
+ acpi_handle handle;
+ struct regmap *regmap;
+ struct acpi_battery_hook hook;
+ unsigned int last_charge_ctrl;
+ struct mutex battery_lock; /* Protects the list of currently registered batteries */
+ unsigned int last_switch_status;
+ struct mutex super_key_lock; /* Protects the toggling of the super key lock state */
+ struct list_head batteries;
+ struct mutex led_lock; /* Protects writes to the lightbar registers */
+ struct led_classdev_mc led_mc_cdev;
+ struct mc_subled led_mc_subled_info[LED_CHANNELS];
+ struct mutex input_lock; /* Protects input sequence during notify */
+ struct input_dev *input_device;
+ struct notifier_block nb;
+};
+
+struct uniwill_battery_entry {
+ struct list_head head;
+ struct power_supply *battery;
+};
+
+static bool force;
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported devices\n");
+
+/* Feature bitmask since the associated registers are not reliable */
+static unsigned int supported_features;
+
+static const char * const uniwill_temp_labels[] = {
+ "CPU",
+ "GPU",
+};
+
+static const char * const uniwill_fan_labels[] = {
+ "Main",
+ "Secondary",
+};
+
+static const struct key_entry uniwill_keymap[] = {
+ /* Reported via keyboard controller */
+ { KE_IGNORE, UNIWILL_OSD_CAPSLOCK, { KEY_CAPSLOCK }},
+ { KE_IGNORE, UNIWILL_OSD_NUMLOCK, { KEY_NUMLOCK }},
+
+ /* Reported when the user locks/unlocks the super key */
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE, { KEY_UNKNOWN }},
+ /* Optional, might not be reported by all devices */
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED, { KEY_UNKNOWN }},
+
+ /* Reported in manual mode when toggling the airplane mode status */
+ { KE_KEY, UNIWILL_OSD_RFKILL, { KEY_RFKILL }},
+ { KE_IGNORE, UNIWILL_OSD_RADIOON, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_RADIOOFF, { KEY_UNKNOWN }},
+
+ /* Reported when user wants to cycle the platform profile */
+ { KE_KEY, UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE, { KEY_F14 }},
+
+ /* Reported when the user wants to adjust the brightness of the keyboard */
+ { KE_KEY, UNIWILL_OSD_KBDILLUMDOWN, { KEY_KBDILLUMDOWN }},
+ { KE_KEY, UNIWILL_OSD_KBDILLUMUP, { KEY_KBDILLUMUP }},
+
+ /* Reported when the user wants to toggle the microphone mute status */
+ { KE_KEY, UNIWILL_OSD_MIC_MUTE, { KEY_MICMUTE }},
+
+ /* Reported when the user wants to toggle the mute status */
+ { KE_IGNORE, UNIWILL_OSD_MUTE, { KEY_MUTE }},
+
+ /* Reported when the user locks/unlocks the Fn key */
+ { KE_IGNORE, UNIWILL_OSD_FN_LOCK, { KEY_FN_ESC }},
+
+ /* Reported when the user wants to toggle the brightness of the keyboard */
+ { KE_KEY, UNIWILL_OSD_KBDILLUMTOGGLE, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL0, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL1, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL2, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL3, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL4, { KEY_KBDILLUMTOGGLE }},
+
+ /* FIXME: find out the exact meaning of those events */
+ { KE_IGNORE, UNIWILL_OSD_BAT_CHARGE_FULL_24_H, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_BAT_ERM_UPDATE, { KEY_UNKNOWN }},
+
+ /* Reported when the user wants to toggle the benchmark mode status */
+ { KE_IGNORE, UNIWILL_OSD_BENCHMARK_MODE_TOGGLE, { KEY_UNKNOWN }},
+
+ /* Reported when the user wants to toggle the webcam */
+ { KE_IGNORE, UNIWILL_OSD_WEBCAM_TOGGLE, { KEY_UNKNOWN }},
+
+ { KE_END }
+};
+
+static int uniwill_ec_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ union acpi_object params[2] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = reg,
+ },
+ },
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = val,
+ },
+ },
+ };
+ struct uniwill_data *data = context;
+ struct acpi_object_list input = {
+ .count = ARRAY_SIZE(params),
+ .pointer = params,
+ };
+ acpi_status status;
+
+ status = acpi_evaluate_object(data->handle, "ECRW", &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2);
+
+ return 0;
+}
+
+static int uniwill_ec_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ union acpi_object params[1] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = reg,
+ },
+ },
+ };
+ struct uniwill_data *data = context;
+ struct acpi_object_list input = {
+ .count = ARRAY_SIZE(params),
+ .pointer = params,
+ };
+ unsigned long long output;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(data->handle, "ECRR", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (output > U8_MAX)
+ return -ENXIO;
+
+ usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2);
+
+ *val = output;
+
+ return 0;
+}
+
+static const struct regmap_bus uniwill_ec_bus = {
+ .reg_write = uniwill_ec_reg_write,
+ .reg_read = uniwill_ec_reg_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static bool uniwill_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_AP_OEM:
+ case EC_ADDR_LIGHTBAR_AC_CTRL:
+ case EC_ADDR_LIGHTBAR_AC_RED:
+ case EC_ADDR_LIGHTBAR_AC_GREEN:
+ case EC_ADDR_LIGHTBAR_AC_BLUE:
+ case EC_ADDR_BIOS_OEM:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_OEM_4:
+ case EC_ADDR_CHARGE_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_RED:
+ case EC_ADDR_LIGHTBAR_BAT_GREEN:
+ case EC_ADDR_LIGHTBAR_BAT_BLUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool uniwill_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_CPU_TEMP:
+ case EC_ADDR_GPU_TEMP:
+ case EC_ADDR_MAIN_FAN_RPM_1:
+ case EC_ADDR_MAIN_FAN_RPM_2:
+ case EC_ADDR_SECOND_FAN_RPM_1:
+ case EC_ADDR_SECOND_FAN_RPM_2:
+ case EC_ADDR_BAT_ALERT:
+ case EC_ADDR_PROJECT_ID:
+ case EC_ADDR_AP_OEM:
+ case EC_ADDR_LIGHTBAR_AC_CTRL:
+ case EC_ADDR_LIGHTBAR_AC_RED:
+ case EC_ADDR_LIGHTBAR_AC_GREEN:
+ case EC_ADDR_LIGHTBAR_AC_BLUE:
+ case EC_ADDR_BIOS_OEM:
+ case EC_ADDR_PWM_1:
+ case EC_ADDR_PWM_2:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_SWITCH_STATUS:
+ case EC_ADDR_OEM_4:
+ case EC_ADDR_CHARGE_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_RED:
+ case EC_ADDR_LIGHTBAR_BAT_GREEN:
+ case EC_ADDR_LIGHTBAR_BAT_BLUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool uniwill_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_CPU_TEMP:
+ case EC_ADDR_GPU_TEMP:
+ case EC_ADDR_MAIN_FAN_RPM_1:
+ case EC_ADDR_MAIN_FAN_RPM_2:
+ case EC_ADDR_SECOND_FAN_RPM_1:
+ case EC_ADDR_SECOND_FAN_RPM_2:
+ case EC_ADDR_BAT_ALERT:
+ case EC_ADDR_PWM_1:
+ case EC_ADDR_PWM_2:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_SWITCH_STATUS:
+ case EC_ADDR_CHARGE_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config uniwill_ec_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .writeable_reg = uniwill_writeable_reg,
+ .readable_reg = uniwill_readable_reg,
+ .volatile_reg = uniwill_volatile_reg,
+ .can_sleep = true,
+ .max_register = 0xFFF,
+ .cache_type = REGCACHE_MAPLE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static ssize_t fn_lock_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = FN_LOCK_STATUS;
+ else
+ value = 0;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_BIOS_OEM, FN_LOCK_STATUS, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t fn_lock_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_BIOS_OEM, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !!(value & FN_LOCK_STATUS));
+}
+
+static DEVICE_ATTR_RW(fn_lock_toggle_enable);
+
+static ssize_t super_key_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&data->super_key_lock);
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We can only toggle the super key lock, so we return early if the setting
+ * is already in the correct state.
+ */
+ if (enable == !(value & SUPER_KEY_LOCK_STATUS))
+ return count;
+
+ ret = regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK,
+ TRIGGER_SUPER_KEY_LOCK);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t super_key_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & SUPER_KEY_LOCK_STATUS));
+}
+
+static DEVICE_ATTR_RW(super_key_toggle_enable);
+
+static ssize_t touchpad_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = 0;
+ else
+ value = TOUCHPAD_TOGGLE_OFF;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_OEM_4, TOUCHPAD_TOGGLE_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t touchpad_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_OEM_4, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & TOUCHPAD_TOGGLE_OFF));
+}
+
+static DEVICE_ATTR_RW(touchpad_toggle_enable);
+
+static ssize_t rainbow_animation_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = LIGHTBAR_WELCOME;
+ else
+ value = 0;
+
+ guard(mutex)(&data->led_lock);
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_WELCOME, value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_WELCOME, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t rainbow_animation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !!(value & LIGHTBAR_WELCOME));
+}
+
+static DEVICE_ATTR_RW(rainbow_animation);
+
+static ssize_t breathing_in_suspend_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = 0;
+ else
+ value = LIGHTBAR_S3_OFF;
+
+ /* We only access a single register here, so we do not need to use data->led_lock */
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S3_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t breathing_in_suspend_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & LIGHTBAR_S3_OFF));
+}
+
+static DEVICE_ATTR_RW(breathing_in_suspend);
+
+static struct attribute *uniwill_attrs[] = {
+ /* Keyboard-related */
+ &dev_attr_fn_lock_toggle_enable.attr,
+ &dev_attr_super_key_toggle_enable.attr,
+ &dev_attr_touchpad_toggle_enable.attr,
+ /* Lightbar-related */
+ &dev_attr_rainbow_animation.attr,
+ &dev_attr_breathing_in_suspend.attr,
+ NULL
+};
+
+static umode_t uniwill_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_fn_lock_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_FN_LOCK_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_super_key_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_touchpad_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_TOUCHPAD_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_rainbow_animation.attr ||
+ attr == &dev_attr_breathing_in_suspend.attr) {
+ if (supported_features & UNIWILL_FEATURE_LIGHTBAR)
+ return attr->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group uniwill_group = {
+ .is_visible = uniwill_attr_is_visible,
+ .attrs = uniwill_attrs,
+};
+
+static const struct attribute_group *uniwill_groups[] = {
+ &uniwill_group,
+ NULL
+};
+
+static int uniwill_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ __be16 rpm;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (channel) {
+ case 0:
+ ret = regmap_read(data->regmap, EC_ADDR_CPU_TEMP, &value);
+ break;
+ case 1:
+ ret = regmap_read(data->regmap, EC_ADDR_GPU_TEMP, &value);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = value * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ case hwmon_fan:
+ switch (channel) {
+ case 0:
+ ret = regmap_bulk_read(data->regmap, EC_ADDR_MAIN_FAN_RPM_1, &rpm,
+ sizeof(rpm));
+ break;
+ case 1:
+ ret = regmap_bulk_read(data->regmap, EC_ADDR_SECOND_FAN_RPM_1, &rpm,
+ sizeof(rpm));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = be16_to_cpu(rpm);
+ return 0;
+ case hwmon_pwm:
+ switch (channel) {
+ case 0:
+ ret = regmap_read(data->regmap, EC_ADDR_PWM_1, &value);
+ break;
+ case 1:
+ ret = regmap_read(data->regmap, EC_ADDR_PWM_2, &value);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = fixp_linear_interpolate(0, 0, PWM_MAX, U8_MAX, value);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int uniwill_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = uniwill_temp_labels[channel];
+ return 0;
+ case hwmon_fan:
+ *str = uniwill_fan_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops uniwill_ops = {
+ .visible = 0444,
+ .read = uniwill_read,
+ .read_string = uniwill_read_string,
+};
+
+static const struct hwmon_channel_info * const uniwill_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info uniwill_chip_info = {
+ .ops = &uniwill_ops,
+ .info = uniwill_info,
+};
+
+static int uniwill_hwmon_init(struct uniwill_data *data)
+{
+ struct device *hdev;
+
+ if (!(supported_features & UNIWILL_FEATURE_HWMON))
+ return 0;
+
+ hdev = devm_hwmon_device_register_with_info(data->dev, "uniwill", data,
+ &uniwill_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hdev);
+}
+
+static const unsigned int uniwill_led_channel_to_bat_reg[LED_CHANNELS] = {
+ EC_ADDR_LIGHTBAR_BAT_RED,
+ EC_ADDR_LIGHTBAR_BAT_GREEN,
+ EC_ADDR_LIGHTBAR_BAT_BLUE,
+};
+
+static const unsigned int uniwill_led_channel_to_ac_reg[LED_CHANNELS] = {
+ EC_ADDR_LIGHTBAR_AC_RED,
+ EC_ADDR_LIGHTBAR_AC_GREEN,
+ EC_ADDR_LIGHTBAR_AC_BLUE,
+};
+
+static int uniwill_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness)
+{
+ struct led_classdev_mc *led_mc_cdev = lcdev_to_mccdev(led_cdev);
+ struct uniwill_data *data = container_of(led_mc_cdev, struct uniwill_data, led_mc_cdev);
+ unsigned int value;
+ int ret;
+
+ ret = led_mc_calc_color_components(led_mc_cdev, brightness);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&data->led_lock);
+
+ for (int i = 0; i < LED_CHANNELS; i++) {
+ /* Prevent the brightness values from overflowing */
+ value = min(LED_MAX_BRIGHTNESS, data->led_mc_subled_info[i].brightness);
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (brightness)
+ value = 0;
+ else
+ value = LIGHTBAR_S0_OFF;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S0_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_S0_OFF, value);
+}
+
+#define LIGHTBAR_MASK (LIGHTBAR_APP_EXISTS | LIGHTBAR_S0_OFF | LIGHTBAR_S3_OFF | LIGHTBAR_WELCOME)
+
+static int uniwill_led_init(struct uniwill_data *data)
+{
+ struct led_init_data init_data = {
+ .devicename = DRIVER_NAME,
+ .default_label = "multicolor:" LED_FUNCTION_STATUS,
+ .devname_mandatory = true,
+ };
+ unsigned int color_indices[3] = {
+ LED_COLOR_ID_RED,
+ LED_COLOR_ID_GREEN,
+ LED_COLOR_ID_BLUE,
+ };
+ unsigned int value;
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_LIGHTBAR))
+ return 0;
+
+ ret = devm_mutex_init(data->dev, &data->led_lock);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The EC has separate lightbar settings for AC and battery mode,
+ * so we have to ensure that both settings are the same.
+ */
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value |= LIGHTBAR_APP_EXISTS;
+ ret = regmap_write(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The breathing animation during suspend is not supported when
+ * running on battery power.
+ */
+ value |= LIGHTBAR_S3_OFF;
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_MASK, value);
+ if (ret < 0)
+ return ret;
+
+ data->led_mc_cdev.led_cdev.color = LED_COLOR_ID_MULTI;
+ data->led_mc_cdev.led_cdev.max_brightness = LED_MAX_BRIGHTNESS;
+ data->led_mc_cdev.led_cdev.flags = LED_REJECT_NAME_CONFLICT;
+ data->led_mc_cdev.led_cdev.brightness_set_blocking = uniwill_led_brightness_set;
+
+ if (value & LIGHTBAR_S0_OFF)
+ data->led_mc_cdev.led_cdev.brightness = 0;
+ else
+ data->led_mc_cdev.led_cdev.brightness = LED_MAX_BRIGHTNESS;
+
+ for (int i = 0; i < LED_CHANNELS; i++) {
+ data->led_mc_subled_info[i].color_index = color_indices[i];
+
+ ret = regmap_read(data->regmap, uniwill_led_channel_to_ac_reg[i], &value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Make sure that the initial intensity value is not greater than
+ * the maximum brightness.
+ */
+ value = min(LED_MAX_BRIGHTNESS, value);
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ data->led_mc_subled_info[i].intensity = value;
+ data->led_mc_subled_info[i].channel = i;
+ }
+
+ data->led_mc_cdev.subled_info = data->led_mc_subled_info;
+ data->led_mc_cdev.num_colors = LED_CHANNELS;
+
+ return devm_led_classdev_multicolor_register_ext(data->dev, &data->led_mc_cdev,
+ &init_data);
+}
+
+static int uniwill_get_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct uniwill_data *data = drvdata;
+ union power_supply_propval prop;
+ unsigned int regval;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_PRESENT, &prop);
+ if (ret < 0)
+ return ret;
+
+ if (!prop.intval) {
+ val->intval = POWER_SUPPLY_HEALTH_NO_BATTERY;
+ return 0;
+ }
+
+ ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_STATUS, &prop);
+ if (ret < 0)
+ return ret;
+
+ if (prop.intval == POWER_SUPPLY_STATUS_UNKNOWN) {
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return 0;
+ }
+
+ ret = regmap_read(data->regmap, EC_ADDR_BAT_ALERT, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (regval) {
+ /* Charging issue */
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ return 0;
+ }
+
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ ret = regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = clamp_val(FIELD_GET(CHARGE_CTRL_MASK, regval), 0, 100);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uniwill_set_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct uniwill_data *data = drvdata;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ if (val->intval < 1 || val->intval > 100)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK,
+ val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uniwill_property_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext, void *drvdata,
+ enum power_supply_property psp)
+{
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return true;
+
+ return false;
+}
+
+static const enum power_supply_property uniwill_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_ext uniwill_extension = {
+ .name = DRIVER_NAME,
+ .properties = uniwill_properties,
+ .num_properties = ARRAY_SIZE(uniwill_properties),
+ .get_property = uniwill_get_property,
+ .set_property = uniwill_set_property,
+ .property_is_writeable = uniwill_property_is_writeable,
+};
+
+static int uniwill_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct uniwill_data *data = container_of(hook, struct uniwill_data, hook);
+ struct uniwill_battery_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ ret = power_supply_register_extension(battery, &uniwill_extension, data->dev, data);
+ if (ret < 0) {
+ kfree(entry);
+ return ret;
+ }
+
+ guard(mutex)(&data->battery_lock);
+
+ entry->battery = battery;
+ list_add(&entry->head, &data->batteries);
+
+ return 0;
+}
+
+static int uniwill_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct uniwill_data *data = container_of(hook, struct uniwill_data, hook);
+ struct uniwill_battery_entry *entry, *tmp;
+
+ scoped_guard(mutex, &data->battery_lock) {
+ list_for_each_entry_safe(entry, tmp, &data->batteries, head) {
+ if (entry->battery == battery) {
+ list_del(&entry->head);
+ kfree(entry);
+ break;
+ }
+ }
+ }
+
+ power_supply_unregister_extension(battery, &uniwill_extension);
+
+ return 0;
+}
+
+static int uniwill_battery_init(struct uniwill_data *data)
+{
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ ret = devm_mutex_init(data->dev, &data->battery_lock);
+ if (ret < 0)
+ return ret;
+
+ INIT_LIST_HEAD(&data->batteries);
+ data->hook.name = "Uniwill Battery Extension";
+ data->hook.add_battery = uniwill_add_battery;
+ data->hook.remove_battery = uniwill_remove_battery;
+
+ return devm_battery_hook_register(data->dev, &data->hook);
+}
+
+static int uniwill_notifier_call(struct notifier_block *nb, unsigned long action, void *dummy)
+{
+ struct uniwill_data *data = container_of(nb, struct uniwill_data, nb);
+ struct uniwill_battery_entry *entry;
+
+ switch (action) {
+ case UNIWILL_OSD_BATTERY_ALERT:
+ mutex_lock(&data->battery_lock);
+ list_for_each_entry(entry, &data->batteries, head) {
+ power_supply_changed(entry->battery);
+ }
+ mutex_unlock(&data->battery_lock);
+
+ return NOTIFY_OK;
+ case UNIWILL_OSD_DC_ADAPTER_CHANGED:
+ /* noop for the time being, will change once charging priority
+ * gets implemented.
+ */
+
+ return NOTIFY_OK;
+ default:
+ mutex_lock(&data->input_lock);
+ sparse_keymap_report_event(data->input_device, action, 1, true);
+ mutex_unlock(&data->input_lock);
+
+ return NOTIFY_OK;
+ }
+}
+
+static int uniwill_input_init(struct uniwill_data *data)
+{
+ int ret;
+
+ ret = devm_mutex_init(data->dev, &data->input_lock);
+ if (ret < 0)
+ return ret;
+
+ data->input_device = devm_input_allocate_device(data->dev);
+ if (!data->input_device)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(data->input_device, uniwill_keymap, NULL);
+ if (ret < 0)
+ return ret;
+
+ data->input_device->name = "Uniwill WMI hotkeys";
+ data->input_device->phys = "wmi/input0";
+ data->input_device->id.bustype = BUS_HOST;
+ ret = input_register_device(data->input_device);
+ if (ret < 0)
+ return ret;
+
+ data->nb.notifier_call = uniwill_notifier_call;
+
+ return devm_uniwill_wmi_register_notifier(data->dev, &data->nb);
+}
+
+static void uniwill_disable_manual_control(void *context)
+{
+ struct uniwill_data *data = context;
+
+ regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+}
+
+static int uniwill_ec_init(struct uniwill_data *data)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_PROJECT_ID, &value);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(data->dev, "Project ID: %u\n", value);
+
+ ret = regmap_set_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(data->dev, uniwill_disable_manual_control, data);
+}
+
+static int uniwill_probe(struct platform_device *pdev)
+{
+ struct uniwill_data *data;
+ struct regmap *regmap;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->handle = handle;
+ platform_set_drvdata(pdev, data);
+
+ regmap = devm_regmap_init(&pdev->dev, &uniwill_ec_bus, data, &uniwill_ec_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data->regmap = regmap;
+ ret = devm_mutex_init(&pdev->dev, &data->super_key_lock);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_ec_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_battery_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_led_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_hwmon_init(data);
+ if (ret < 0)
+ return ret;
+
+ return uniwill_input_init(data);
+}
+
+static void uniwill_shutdown(struct platform_device *pdev)
+{
+ struct uniwill_data *data = platform_get_drvdata(pdev);
+
+ regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+}
+
+static int uniwill_suspend_keyboard(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE))
+ return 0;
+
+ /*
+ * The EC_ADDR_SWITCH_STATUS is marked as volatile, so we have to restore it
+ * ourselves.
+ */
+ return regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &data->last_switch_status);
+}
+
+static int uniwill_suspend_battery(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ /*
+ * Save the current charge limit in order to restore it during resume.
+ * We cannot use the regmap code for that since this register needs to
+ * be declared as volatile due to CHARGE_CTRL_REACHED.
+ */
+ return regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, &data->last_charge_ctrl);
+}
+
+static int uniwill_suspend(struct device *dev)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = uniwill_suspend_keyboard(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_suspend_battery(data);
+ if (ret < 0)
+ return ret;
+
+ regcache_cache_only(data->regmap, true);
+ regcache_mark_dirty(data->regmap);
+
+ return 0;
+}
+
+static int uniwill_resume_keyboard(struct uniwill_data *data)
+{
+ unsigned int value;
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE))
+ return 0;
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ if ((data->last_switch_status & SUPER_KEY_LOCK_STATUS) == (value & SUPER_KEY_LOCK_STATUS))
+ return 0;
+
+ return regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK,
+ TRIGGER_SUPER_KEY_LOCK);
+}
+
+static int uniwill_resume_battery(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK,
+ data->last_charge_ctrl);
+}
+
+static int uniwill_resume(struct device *dev)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ regcache_cache_only(data->regmap, false);
+
+ ret = regcache_sync(data->regmap);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_resume_keyboard(data);
+ if (ret < 0)
+ return ret;
+
+ return uniwill_resume_battery(data);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(uniwill_pm_ops, uniwill_suspend, uniwill_resume);
+
+/*
+ * We only use the DMI table for auoloading because the ACPI device itself
+ * does not guarantee that the underlying EC implementation is supported.
+ */
+static const struct acpi_device_id uniwill_id_table[] = {
+ { "INOU0000" },
+ { },
+};
+
+static struct platform_driver uniwill_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .dev_groups = uniwill_groups,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .acpi_match_table = uniwill_id_table,
+ .pm = pm_sleep_ptr(&uniwill_pm_ops),
+ },
+ .probe = uniwill_probe,
+ .shutdown = uniwill_shutdown,
+};
+
+static const struct dmi_system_id uniwill_dmi_table[] __initconst = {
+ {
+ .ident = "XMG FUSION 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
+ },
+ },
+ {
+ .ident = "XMG FUSION 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
+ },
+ },
+ {
+ .ident = "Intel NUC x15",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPAC71H"),
+ },
+ .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE |
+ UNIWILL_FEATURE_SUPER_KEY_TOGGLE |
+ UNIWILL_FEATURE_TOUCHPAD_TOGGLE |
+ UNIWILL_FEATURE_BATTERY |
+ UNIWILL_FEATURE_HWMON),
+ },
+ {
+ .ident = "Intel NUC x15",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPKC71F"),
+ },
+ .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE |
+ UNIWILL_FEATURE_SUPER_KEY_TOGGLE |
+ UNIWILL_FEATURE_TOUCHPAD_TOGGLE |
+ UNIWILL_FEATURE_LIGHTBAR |
+ UNIWILL_FEATURE_BATTERY |
+ UNIWILL_FEATURE_HWMON),
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTxX1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTQx1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxARX1_PHxAQF1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 16 Gen7 Intel/Commodore Omnia-Book Pro Gen 7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6AG01_PH6AQ71_PH6AQI1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/16 Gen8 Intel/Commodore Omnia-Book Pro Gen 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen8 Intel/Commodore Omnia-Book Pro Gen 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PG31"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 16 Gen8 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6PG01_PH6PG71"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 Intel/Commodore Omnia-Book 15 Gen9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxMRXx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 15 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxAR4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5KK45xS_X5SP45xS"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6HP45xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6KK45xU_X6SP45xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 15 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5AR45xS"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR55xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Trinity 15 Intel Gen1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1501I"),
+ },
+ },
+ {
+ .ident = "TUXEDO Trinity 17 Intel Gen1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1701I"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen2 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxMGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen2 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxTGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen4 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 15 Gen4 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxAGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen5 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen5 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16/17 Gen5 Intel/Commodore ORION Gen 5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxPXxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris Slim 15 Gen6 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris Slim 15 Gen6 Intel/Commodore ORION Slim 15 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM5IXxA"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB1"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB2"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 17 Gen6 Intel/Commodore ORION 17 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM7IXxN"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6FR5xxY"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY_mLED"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 14 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1401"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1501"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 15 Gen2 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, uniwill_dmi_table);
+
+static int __init uniwill_init(void)
+{
+ const struct dmi_system_id *id;
+ int ret;
+
+ id = dmi_first_match(uniwill_dmi_table);
+ if (!id) {
+ if (!force)
+ return -ENODEV;
+
+ /* Assume that the device supports all features */
+ supported_features = UINT_MAX;
+ pr_warn("Loading on a potentially unsupported device\n");
+ } else {
+ supported_features = (uintptr_t)id->driver_data;
+ }
+
+ ret = platform_driver_register(&uniwill_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_wmi_register_driver();
+ if (ret < 0) {
+ platform_driver_unregister(&uniwill_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(uniwill_init);
+
+static void __exit uniwill_exit(void)
+{
+ uniwill_wmi_unregister_driver();
+ platform_driver_unregister(&uniwill_driver);
+}
+module_exit(uniwill_exit);
+
+MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>");
+MODULE_DESCRIPTION("Uniwill notebook driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.c b/drivers/platform/x86/uniwill/uniwill-wmi.c
new file mode 100644
index 000000000000..31d9c39f14ab
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-wmi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux hotkey driver for Uniwill notebooks.
+ *
+ * Special thanks go to Pőcze Barnabás, Christoffer Sandberg and Werner Sembach
+ * for supporting the development of this driver either through prior work or
+ * by answering questions regarding the underlying WMI interface.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#include "uniwill-wmi.h"
+
+#define DRIVER_NAME "uniwill-wmi"
+#define UNIWILL_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+
+static BLOCKING_NOTIFIER_HEAD(uniwill_wmi_chain_head);
+
+static void devm_uniwill_wmi_unregister_notifier(void *data)
+{
+ struct notifier_block *nb = data;
+
+ blocking_notifier_chain_unregister(&uniwill_wmi_chain_head, nb);
+}
+
+int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb)
+{
+ int ret;
+
+ ret = blocking_notifier_chain_register(&uniwill_wmi_chain_head, nb);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_uniwill_wmi_unregister_notifier, nb);
+}
+
+static void uniwill_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
+{
+ u32 value;
+
+ if (obj->type != ACPI_TYPE_INTEGER)
+ return;
+
+ value = obj->integer.value;
+
+ dev_dbg(&wdev->dev, "Received WMI event %u\n", value);
+
+ blocking_notifier_call_chain(&uniwill_wmi_chain_head, value, NULL);
+}
+
+/*
+ * We cannot fully trust this GUID since Uniwill just copied the WMI GUID
+ * from the Windows driver example, and others probably did the same.
+ *
+ * Because of this we cannot use this WMI GUID for autoloading. Instead the
+ * associated driver will be registered manually after matching a DMI table.
+ */
+static const struct wmi_device_id uniwill_wmi_id_table[] = {
+ { UNIWILL_EVENT_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver uniwill_wmi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = uniwill_wmi_id_table,
+ .notify = uniwill_wmi_notify,
+ .no_singleton = true,
+};
+
+int __init uniwill_wmi_register_driver(void)
+{
+ return wmi_driver_register(&uniwill_wmi_driver);
+}
+
+void __exit uniwill_wmi_unregister_driver(void)
+{
+ wmi_driver_unregister(&uniwill_wmi_driver);
+}
diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.h b/drivers/platform/x86/uniwill/uniwill-wmi.h
new file mode 100644
index 000000000000..48783b2e9ffb
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-wmi.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Linux hotkey driver for Uniwill notebooks.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#ifndef UNIWILL_WMI_H
+#define UNIWILL_WMI_H
+
+#include <linux/init.h>
+
+#define UNIWILL_OSD_CAPSLOCK 0x01
+#define UNIWILL_OSD_NUMLOCK 0x02
+#define UNIWILL_OSD_SCROLLLOCK 0x03
+
+#define UNIWILL_OSD_TOUCHPAD_ON 0x04
+#define UNIWILL_OSD_TOUCHPAD_OFF 0x05
+
+#define UNIWILL_OSD_SILENT_MODE_ON 0x06
+#define UNIWILL_OSD_SILENT_MODE_OFF 0x07
+
+#define UNIWILL_OSD_WLAN_ON 0x08
+#define UNIWILL_OSD_WLAN_OFF 0x09
+
+#define UNIWILL_OSD_WIMAX_ON 0x0A
+#define UNIWILL_OSD_WIMAX_OFF 0x0B
+
+#define UNIWILL_OSD_BLUETOOTH_ON 0x0C
+#define UNIWILL_OSD_BLUETOOTH_OFF 0x0D
+
+#define UNIWILL_OSD_RF_ON 0x0E
+#define UNIWILL_OSD_RF_OFF 0x0F
+
+#define UNIWILL_OSD_3G_ON 0x10
+#define UNIWILL_OSD_3G_OFF 0x11
+
+#define UNIWILL_OSD_WEBCAM_ON 0x12
+#define UNIWILL_OSD_WEBCAM_OFF 0x13
+
+#define UNIWILL_OSD_BRIGHTNESSUP 0x14
+#define UNIWILL_OSD_BRIGHTNESSDOWN 0x15
+
+#define UNIWILL_OSD_RADIOON 0x1A
+#define UNIWILL_OSD_RADIOOFF 0x1B
+
+#define UNIWILL_OSD_POWERSAVE_ON 0x31
+#define UNIWILL_OSD_POWERSAVE_OFF 0x32
+
+#define UNIWILL_OSD_MENU 0x34
+
+#define UNIWILL_OSD_MUTE 0x35
+#define UNIWILL_OSD_VOLUMEDOWN 0x36
+#define UNIWILL_OSD_VOLUMEUP 0x37
+
+#define UNIWILL_OSD_MENU_2 0x38
+
+#define UNIWILL_OSD_LIGHTBAR_ON 0x39
+#define UNIWILL_OSD_LIGHTBAR_OFF 0x3A
+
+#define UNIWILL_OSD_KB_LED_LEVEL0 0x3B
+#define UNIWILL_OSD_KB_LED_LEVEL1 0x3C
+#define UNIWILL_OSD_KB_LED_LEVEL2 0x3D
+#define UNIWILL_OSD_KB_LED_LEVEL3 0x3E
+#define UNIWILL_OSD_KB_LED_LEVEL4 0x3F
+
+#define UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE 0x40
+#define UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE 0x41
+
+#define UNIWILL_OSD_MENU_JP 0x42
+
+#define UNIWILL_OSD_CAMERA_ON 0x90
+#define UNIWILL_OSD_CAMERA_OFF 0x91
+
+#define UNIWILL_OSD_RFKILL 0xA4
+
+#define UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED 0xA5
+
+#define UNIWILL_OSD_LIGHTBAR_STATE_CHANGED 0xA6
+
+#define UNIWILL_OSD_FAN_BOOST_STATE_CHANGED 0xA7
+
+#define UNIWILL_OSD_LCD_SW 0xA9
+
+#define UNIWILL_OSD_FAN_OVERTEMP 0xAA
+
+#define UNIWILL_OSD_DC_ADAPTER_CHANGED 0xAB
+
+#define UNIWILL_OSD_BAT_HP_OFF 0xAC
+
+#define UNIWILL_OSD_FAN_DOWN_TEMP 0xAD
+
+#define UNIWILL_OSD_BATTERY_ALERT 0xAE
+
+#define UNIWILL_OSD_TIMAP_HAIERLB_SW 0xAF
+
+#define UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE 0xB0
+
+#define UNIWILL_OSD_KBDILLUMDOWN 0xB1
+#define UNIWILL_OSD_KBDILLUMUP 0xB2
+
+#define UNIWILL_OSD_BACKLIGHT_LEVEL_CHANGE 0xB3
+#define UNIWILL_OSD_BACKLIGHT_POWER_CHANGE 0xB4
+
+#define UNIWILL_OSD_MIC_MUTE 0xB7
+
+#define UNIWILL_OSD_FN_LOCK 0xB8
+#define UNIWILL_OSD_KBDILLUMTOGGLE 0xB9
+
+#define UNIWILL_OSD_BAT_CHARGE_FULL_24_H 0xBE
+
+#define UNIWILL_OSD_BAT_ERM_UPDATE 0xBF
+
+#define UNIWILL_OSD_BENCHMARK_MODE_TOGGLE 0xC0
+
+#define UNIWILL_OSD_WEBCAM_TOGGLE 0xCF
+
+#define UNIWILL_OSD_KBD_BACKLIGHT_CHANGED 0xF0
+
+struct device;
+struct notifier_block;
+
+int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb);
+
+int __init uniwill_wmi_register_driver(void);
+
+void __exit uniwill_wmi_unregister_driver(void);
+
+#endif /* UNIWILL_WMI_H */
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index e3d3a8290949..8d825e0b4661 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -543,7 +543,7 @@ static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
ret = device_add_software_node(codec_dev, &lenovo_yoga_tab2_830_1050_wm5102);
if (ret) {
- ret = dev_err_probe(codec_dev, ret, "adding software node\n");
+ dev_err_probe(codec_dev, ret, "adding software node\n");
goto err_put_pinctrl;
}
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
index 2f8cd8d9e0ab..ebbedfe5f4e8 100644
--- a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -183,7 +183,7 @@ static void atla10_ec_external_power_changed(struct power_supply *psy)
struct atla10_ec_data *data = power_supply_get_drvdata(psy);
/* After charger plug in/out wait 0.5s for things to stabilize */
- mod_delayed_work(system_wq, &data->work, HZ / 2);
+ mod_delayed_work(system_percpu_wq, &data->work, HZ / 2);
}
static const enum power_supply_property atla10_ec_psy_props[] = {