summaryrefslogtreecommitdiff
path: root/drivers/platform/x86/amd/pmf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform/x86/amd/pmf')
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig16
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile7
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c342
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c170
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c112
-rw-r--r--drivers/platform/x86/amd/pmf/core.c227
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h485
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c343
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c382
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c629
10 files changed, 2551 insertions, 162 deletions
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index c375498c4071..25b8f7ae3abd 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -6,7 +6,12 @@
config AMD_PMF
tristate "AMD Platform Management Framework"
depends on ACPI && PCI
+ depends on POWER_SUPPLY
+ depends on AMD_NODE
select ACPI_PLATFORM_PROFILE
+ depends on TEE && AMDTEE
+ depends on AMD_SFH_HID
+ depends on HAS_IOMEM
help
This driver provides support for the AMD Platform Management Framework.
The goal is to enhance end user experience by making AMD PCs smarter,
@@ -14,3 +19,14 @@ config AMD_PMF
To compile this driver as a module, choose M here: the module will
be called amd_pmf.
+
+config AMD_PMF_DEBUG
+ bool "PMF debug information"
+ depends on AMD_PMF
+ help
+ Enabling this option would give more debug information on the OEM fed
+ power setting values for each of the PMF feature. PMF driver gets this
+ information after evaluating a ACPI method and the information is stored
+ in the PMF config store.
+
+ Say Y here to enable more debug logs and Say N here if you are not sure.
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index fdededf54392..5978464e0eb7 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -4,6 +4,7 @@
# AMD Platform Management Framework
#
-obj-$(CONFIG_AMD_PMF) += amd-pmf.o
-amd-pmf-objs := core.o acpi.o sps.o \
- auto-mode.o cnqf.o
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-y := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o \
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 081e84e116e7..13c4fec2c7ef 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -90,12 +90,101 @@ out:
return err;
}
+static union acpi_object *apts_if_call(struct amd_pmf_dev *pdev, u32 state_index)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apts_if_arg_list;
+ union acpi_object apts_if_args[3];
+ acpi_status status;
+
+ apts_if_arg_list.count = 3;
+ apts_if_arg_list.pointer = &apts_if_args[0];
+
+ apts_if_args[0].type = ACPI_TYPE_INTEGER;
+ apts_if_args[0].integer.value = 1;
+ apts_if_args[1].type = ACPI_TYPE_INTEGER;
+ apts_if_args[1].integer.value = state_index;
+ apts_if_args[2].type = ACPI_TYPE_INTEGER;
+ apts_if_args[2].integer.value = 0;
+
+ status = acpi_evaluate_object(ahandle, "APTS", &apts_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APTS state_idx:%u call failed\n", state_index);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apts_if_call_store_buffer(struct amd_pmf_dev *pdev,
+ u32 index, void *data, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apts_if_call(pdev, index);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller than header size %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, info->buffer.pointer, out_sz);
+out:
+ kfree(info);
+ return err;
+}
+
int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
{
/* If bit-n is set, that indicates function n+1 is supported */
return !!(pdev->supported_func & BIT(index - 1));
}
+int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev)
+{
+ return !!(pdev->notifications & CUSTOM_BIOS_INPUT_BITS);
+}
+
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apts_if_call_store_buffer(pdev, apts_idx, data, sizeof(*data));
+}
+
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output_v2 *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *data)
{
@@ -106,6 +195,26 @@ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
data, sizeof(*data));
}
+int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
+{
+ struct os_power_slider args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+ args.slider_event = event;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ return 0;
+}
+
static void apmf_sbios_heartbeat_notify(struct work_struct *work)
{
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
@@ -114,12 +223,47 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
if (!info)
- goto out;
+ return;
- schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+ schedule_delayed_work(&dev->heart_beat, secs_to_jiffies(dev->hb_interval));
+ kfree(info);
+}
+
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag)
+{
+ struct sbios_hb_event_v2 args = { };
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+
+ switch (flag) {
+ case ON_LOAD:
+ args.load = 1;
+ break;
+ case ON_UNLOAD:
+ args.unload = 1;
+ break;
+ case ON_SUSPEND:
+ args.suspend = 1;
+ break;
+ case ON_RESUME:
+ args.resume = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "Failed to send v2 heartbeat event, flag:0x%x\n", flag);
+ return -EINVAL;
+ }
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2, &params);
+ if (!info)
+ return -EIO;
-out:
kfree(info);
+ return 0;
}
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
@@ -127,7 +271,6 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
union acpi_object *info;
struct apmf_fan_idx args;
struct acpi_buffer params;
- int err = 0;
args.size = sizeof(args);
args.fan_ctl_mode = manual;
@@ -137,14 +280,34 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
params.pointer = (void *)&args;
info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
- if (!info) {
- err = -EIO;
- goto out;
- }
+ if (!info)
+ return -EIO;
-out:
kfree(info);
- return err;
+ return 0;
+}
+
+static int apmf_notify_smart_pc_update(struct amd_pmf_dev *pdev, u32 val, u32 preq, u32 index)
+{
+ struct amd_pmf_notify_smart_pc_update args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+ args.pending_req = preq;
+ args.custom_bios[index] = val;
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ dev_dbg(pdev->dev, "Notify smart pc update, val: %u\n", val);
+
+ return 0;
}
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
@@ -152,23 +315,78 @@ int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
}
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
+int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
req, sizeof(*req));
}
+static void amd_pmf_handle_early_preq(struct amd_pmf_dev *pdev)
+{
+ if (!pdev->cb_flag)
+ return;
+
+ amd_pmf_invoke_cmd_enact(pdev);
+ pdev->cb_flag = false;
+}
+
+static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret);
+ return;
+ }
+
+ dev_dbg(pmf_dev->dev, "Pending request (preq): 0x%x\n", pmf_dev->req.pending_req);
+
+ amd_pmf_handle_early_preq(pmf_dev);
+}
+
+static void apmf_event_handler_v1(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v1(pmf_dev, &pmf_dev->req1);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get v1 SBIOS requests: %d\n", ret);
+ return;
+ }
+
+ dev_dbg(pmf_dev->dev, "Pending request (preq1): 0x%x\n", pmf_dev->req1.pending_req);
+
+ amd_pmf_handle_early_preq(pmf_dev);
+}
+
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
struct apmf_sbios_req req;
int ret;
- mutex_lock(&pmf_dev->update_mutex);
+ guard(mutex)(&pmf_dev->update_mutex);
ret = apmf_get_sbios_requests(pmf_dev, &req);
if (ret) {
dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
- goto out;
+ return;
}
if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
@@ -190,8 +408,6 @@ static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
if (pmf_dev->amt_enabled)
amd_pmf_update_2_cql(pmf_dev, req.cql_event);
}
-out:
- mutex_unlock(&pmf_dev->update_mutex);
}
static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
@@ -203,10 +419,16 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
- dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
- output.supported_functions, output.notification_mask);
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
+ output.supported_functions, output.notification_mask, output.version);
+ pdev->pmf_if_version = output.version;
+
+ pdev->notifications = output.notification_mask;
return 0;
}
@@ -243,6 +465,11 @@ int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_
return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
}
+static apmf_event_handler_t apmf_event_handlers[] = {
+ [PMF_IF_V1] = apmf_event_handler_v1,
+ [PMF_IF_V2] = apmf_event_handler_v2,
+};
+
int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
@@ -262,19 +489,94 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
apmf_event_handler(ahandle, 0, pmf_dev);
}
+ if (!pmf_dev->smart_pc_enabled)
+ return -EINVAL;
+
+ switch (pmf_dev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pmf_dev))
+ break;
+ fallthrough;
+ case PMF_IF_V2:
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handlers[pmf_dev->pmf_if_version], pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev,
+ "failed to install notify handler v%d for custom BIOS inputs\n",
+ pmf_dev->pmf_if_version);
+ return -ENODEV;
+ }
+ break;
+ default:
+ break;
+ }
+
return 0;
}
+int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
+{
+ struct platform_device *pdev = to_platform_device(pmf_dev->dev);
+
+ pmf_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pmf_dev->res) {
+ dev_dbg(pmf_dev->dev, "Failed to get I/O memory resource\n");
+ return -EINVAL;
+ }
+
+ pmf_dev->policy_addr = pmf_dev->res->start;
+ /*
+ * We cannot use resource_size() here because it adds an extra byte to round off the size.
+ * In the case of PMF ResourceTemplate(), this rounding is already handled within the _CRS.
+ * Using resource_size() would increase the resource size by 1, causing a mismatch with the
+ * length field and leading to issues. Therefore, simply use end-start of the ACPI resource
+ * to obtain the actual length.
+ */
+ pmf_dev->policy_sz = pmf_dev->res->end - pmf_dev->res->start;
+
+ if (!pmf_dev->policy_addr || pmf_dev->policy_sz > POLICY_BUF_MAX_SZ ||
+ pmf_dev->policy_sz == 0) {
+ dev_err(pmf_dev->dev, "Incorrect policy params, possibly a SBIOS bug\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx)
+{
+ if (!is_apmf_func_supported(dev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES))
+ return -EINVAL;
+
+ return apmf_notify_smart_pc_update(dev, val, preq, idx);
+}
+
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
- if (pmf_dev->hb_interval)
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1)
cancel_delayed_work_sync(&pmf_dev->heart_beat);
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+
+ if (!pmf_dev->smart_pc_enabled)
+ return;
+
+ switch (pmf_dev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pmf_dev))
+ break;
+ fallthrough;
+ case PMF_IF_V2:
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handlers[pmf_dev->pmf_if_version]);
+ break;
+ default:
+ break;
+ }
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
@@ -289,11 +591,11 @@ int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
ret = apmf_get_system_params(pmf_dev);
if (ret) {
- dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
goto out;
}
- if (pmf_dev->hb_interval) {
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) {
/* send heartbeats only if the interval is not zero */
INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
schedule_delayed_work(&pmf_dev->heart_beat, 0);
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index 644af42e07cf..faf15a8f74bb 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -15,20 +15,114 @@
static struct auto_mode_mode_config config_store;
static const char *state_as_str(unsigned int state);
+#ifdef CONFIG_AMD_PMF_DEBUG
+static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data)
+{
+ struct auto_mode_mode_settings *its_mode;
+
+ pr_debug("Auto Mode Data - BEGIN\n");
+
+ /* time constant */
+ pr_debug("balanced_to_perf: %u ms\n",
+ data->transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant);
+ pr_debug("perf_to_balanced: %u ms\n",
+ data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant);
+ pr_debug("quiet_to_balanced: %u ms\n",
+ data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant);
+ pr_debug("balanced_to_quiet: %u ms\n",
+ data->transition[AUTO_TRANSITION_TO_QUIET].time_constant);
+
+ /* power floor */
+ pr_debug("pfloor_perf: %u mW\n", data->mode_set[AUTO_PERFORMANCE].power_floor);
+ pr_debug("pfloor_balanced: %u mW\n", data->mode_set[AUTO_BALANCE].power_floor);
+ pr_debug("pfloor_quiet: %u mW\n", data->mode_set[AUTO_QUIET].power_floor);
+
+ /* Power delta for mode change */
+ pr_debug("pd_balanced_to_perf: %u mW\n",
+ data->transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
+ pr_debug("pd_perf_to_balanced: %u mW\n",
+ data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
+ pr_debug("pd_quiet_to_balanced: %u mW\n",
+ data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
+ pr_debug("pd_balanced_to_quiet: %u mW\n",
+ data->transition[AUTO_TRANSITION_TO_QUIET].power_delta);
+
+ /* skin temperature limits */
+ its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
+ pr_debug("stt_apu_perf_on_lap: %u C\n",
+ its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_perf_on_lap: %u C\n",
+ its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_perf_on_lap: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_PERFORMANCE];
+ pr_debug("stt_apu_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_perf: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_BALANCE];
+ pr_debug("stt_apu_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_balanced: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_QUIET];
+ pr_debug("stt_apu_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_quiet: %u mW\n", its_mode->power_control.stt_min);
+
+ /* SPL based power limits */
+ its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
+ pr_debug("fppt_perf_on_lap: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_perf_on_lap: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_perf_on_lap: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_perf_on_lap: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_PERFORMANCE];
+ pr_debug("fppt_perf: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_perf: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_perf: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_perf: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_BALANCE];
+ pr_debug("fppt_balanced: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_balanced: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_balanced: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_balanced: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_QUIET];
+ pr_debug("fppt_quiet: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_quiet: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_quiet: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_quiet: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ /* Fan ID */
+ pr_debug("fan_id_perf: %lu\n",
+ data->mode_set[AUTO_PERFORMANCE].fan_control.fan_id);
+ pr_debug("fan_id_balanced: %lu\n",
+ data->mode_set[AUTO_BALANCE].fan_control.fan_id);
+ pr_debug("fan_id_quiet: %lu\n",
+ data->mode_set[AUTO_QUIET].fan_control.fan_id);
+
+ pr_debug("Auto Mode Data - END\n");
+}
+#else
+static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) {}
+#endif
+
static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
struct auto_mode_mode_config *table)
{
struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
- pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
- pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
+ fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
+ fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
@@ -85,11 +179,34 @@ void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t t
config_store.transition[i].applied = false;
update = true;
}
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[AUTO MODE] average_power : %d mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+ dev_dbg(dev->dev, "[AUTO MODE] time: %lld ms timer: %u ms tc: %u ms\n",
+ time_elapsed_ms, config_store.transition[i].timer,
+ config_store.transition[i].time_constant);
+
+ dev_dbg(dev->dev, "[AUTO MODE] shiftup: %u pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[i].shifting_up,
+ config_store.transition[i].power_threshold,
+ config_store.mode_set[i].power_floor,
+ config_store.transition[i].power_delta);
+#endif
}
dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
state_as_str(config_store.current_mode));
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[AUTO MODE] priority1: %u priority2: %u priority3: %u priority4: %u\n",
+ config_store.transition[0].applied,
+ config_store.transition[1].applied,
+ config_store.transition[2].applied,
+ config_store.transition[3].applied);
+#endif
+
if (update) {
for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
/* Apply the mode with highest priority indentified */
@@ -140,6 +257,30 @@ static void amd_pmf_get_power_threshold(void)
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
config_store.mode_set[AUTO_PERFORMANCE].power_floor -
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ pr_debug("[AUTO MODE TO_QUIET] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold,
+ config_store.mode_set[AUTO_BALANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta);
+
+ pr_debug("[AUTO MODE TO_PERFORMANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold,
+ config_store.mode_set[AUTO_BALANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
+
+ pr_debug("[AUTO MODE QUIET_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE]
+ .power_threshold,
+ config_store.mode_set[AUTO_QUIET].power_floor,
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
+
+ pr_debug("[AUTO MODE PERFORMANCE_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]
+ .power_threshold,
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
+#endif
}
static const char *state_as_str(unsigned int state)
@@ -262,6 +403,8 @@ static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
/* set to initial default values */
config_store.current_mode = AUTO_BALANCE;
dev->socket_power_history_idx = -1;
+
+ amd_pmf_dump_auto_mode_defaults(&config_store);
}
int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
@@ -275,13 +418,8 @@ int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
*/
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- int mode = amd_pmf_get_pprof_modes(dev);
-
- if (mode < 0)
- return mode;
-
dev_dbg(dev->dev, "resetting AMT thermals\n");
- amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(dev);
}
return 0;
}
@@ -299,7 +437,5 @@ void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
{
amd_pmf_load_defaults_auto_mode(dev);
- /* update the thermal limits for Automode */
- amd_pmf_set_automode(dev, config_store.current_mode, NULL);
amd_pmf_init_metrics_table(dev);
}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 3f9731a2ac28..5469fefb6001 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -8,11 +8,67 @@
* Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include "pmf.h"
static struct cnqf_config config_store;
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *state_as_str_cnqf(unsigned int state)
+{
+ switch (state) {
+ case APMF_CNQF_TURBO:
+ return "turbo";
+ case APMF_CNQF_PERFORMANCE:
+ return "performance";
+ case APMF_CNQF_BALANCE:
+ return "balance";
+ case APMF_CNQF_QUIET:
+ return "quiet";
+ default:
+ return "Unknown CnQF State";
+ }
+}
+
+static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx)
+{
+ int i;
+
+ pr_debug("Dynamic Slider %s Defaults - BEGIN\n", idx ? "DC" : "AC");
+ pr_debug("size: %u\n", data->size);
+ pr_debug("flags: 0x%x\n", data->flags);
+
+ /* Time constants */
+ pr_debug("t_perf_to_turbo: %u ms\n", data->t_perf_to_turbo);
+ pr_debug("t_balanced_to_perf: %u ms\n", data->t_balanced_to_perf);
+ pr_debug("t_quiet_to_balanced: %u ms\n", data->t_quiet_to_balanced);
+ pr_debug("t_balanced_to_quiet: %u ms\n", data->t_balanced_to_quiet);
+ pr_debug("t_perf_to_balanced: %u ms\n", data->t_perf_to_balanced);
+ pr_debug("t_turbo_to_perf: %u ms\n", data->t_turbo_to_perf);
+
+ for (i = 0 ; i < CNQF_MODE_MAX ; i++) {
+ pr_debug("pfloor_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].pfloor);
+ pr_debug("fppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].fppt);
+ pr_debug("sppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].sppt);
+ pr_debug("sppt_apuonly_%s: %u mW\n",
+ state_as_str_cnqf(i), data->ps[i].sppt_apu_only);
+ pr_debug("spl_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].spl);
+ pr_debug("stt_minlimit_%s: %u mW\n",
+ state_as_str_cnqf(i), data->ps[i].stt_min_limit);
+ pr_debug("stt_skintemp_apu_%s: %u C\n", state_as_str_cnqf(i),
+ data->ps[i].stt_skintemp[STT_TEMP_APU]);
+ pr_debug("stt_skintemp_hs2_%s: %u C\n", state_as_str_cnqf(i),
+ data->ps[i].stt_skintemp[STT_TEMP_HS2]);
+ pr_debug("fan_id_%s: %u\n", state_as_str_cnqf(i), data->ps[i].fan_id);
+ }
+
+ pr_debug("Dynamic Slider %s Defaults - END\n", idx ? "DC" : "AC");
+}
+#else
+static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) {}
+#endif
+
static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
struct cnqf_config *table)
{
@@ -20,15 +76,15 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
pc = &config_store.mode_set[src][idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
- NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
- NULL);
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
+ fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
+ fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev,
@@ -103,7 +159,7 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
src = amd_pmf_cnqf_get_power_source(dev);
- if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (is_pprof_balanced(dev)) {
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
} else {
/*
@@ -120,6 +176,13 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
config_store.trans_param[src][i].count++;
tp = &config_store.trans_param[src][i];
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "avg_power: %u mW total_power: %u mW count: %u timer: %u ms\n",
+ avg_power, config_store.trans_param[src][i].total_power,
+ config_store.trans_param[src][i].count,
+ config_store.trans_param[src][i].timer);
+#endif
if (tp->timer >= tp->time_constant && tp->count) {
avg_power = tp->total_power / tp->count;
@@ -140,6 +203,18 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
avg_power, socket_power, state_as_str(config_store.current_mode));
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[CNQF] priority1: %u priority2: %u priority3: %u\n",
+ config_store.trans_param[src][0].priority,
+ config_store.trans_param[src][1].priority,
+ config_store.trans_param[src][2].priority);
+
+ dev_dbg(dev->dev, "[CNQF] priority4: %u priority5: %u priority6: %u\n",
+ config_store.trans_param[src][3].priority,
+ config_store.trans_param[src][4].priority,
+ config_store.trans_param[src][5].priority);
+#endif
+
for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
/* apply the highest priority */
if (config_store.trans_param[src][j].priority) {
@@ -284,6 +359,7 @@ static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
return ret;
}
+ amd_pmf_cnqf_dump_defaults(&out, i);
amd_pmf_update_mode_set(i, &out);
amd_pmf_update_trans_data(i, &out);
amd_pmf_update_power_threshold(i);
@@ -307,13 +383,9 @@ static ssize_t cnqf_enable_store(struct device *dev,
const char *buf, size_t count)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- int mode, result, src;
+ int result, src;
bool input;
- mode = amd_pmf_get_pprof_modes(pdev);
- if (mode < 0)
- return mode;
-
result = kstrtobool(buf, &input);
if (result)
return result;
@@ -321,14 +393,14 @@ static ssize_t cnqf_enable_store(struct device *dev,
src = amd_pmf_cnqf_get_power_source(pdev);
pdev->cnqf_enabled = input;
- if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
} else {
if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
- amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(pdev);
}
- dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+ dev_dbg(pdev->dev, "Received CnQF %s\n", str_on_off(input));
return count;
}
@@ -338,7 +410,7 @@ static ssize_t cnqf_enable_show(struct device *dev,
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", str_on_off(pdev->cnqf_enabled));
}
static DEVICE_ATTR_RW(cnqf_enable);
@@ -386,7 +458,7 @@ int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
dev->cnqf_enabled = amd_pmf_check_flags(dev);
/* update the thermal for CnQF */
- if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
src = amd_pmf_cnqf_get_power_source(dev);
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index a5f5a4bcff6d..8fc293c9c538 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <asm/amd/node.h>
#include "pmf.h"
/* PMF-SMU communication registers */
@@ -22,8 +23,6 @@
#define AMD_PMF_REGISTER_ARGUMENT 0xA58
/* Base address of SMU for mapping physical address to virtual address */
-#define AMD_PMF_SMU_INDEX_ADDRESS 0xB8
-#define AMD_PMF_SMU_INDEX_DATA 0xBC
#define AMD_PMF_MAPPING_SIZE 0x01000
#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
@@ -38,10 +37,6 @@
#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMF_RESULT_FAILED 0xFF
-/* List of supported CPU ids */
-#define AMD_CPU_ID_RMB 0x14b5
-#define AMD_CPU_ID_PS 0x14e8
-
#define PMF_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
@@ -58,6 +53,29 @@ static bool force_load;
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pmf);
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
+ amd_pmf_power_slider_update_event(pmf);
+
+ return NOTIFY_OK;
+}
+
static int current_power_limits_show(struct seq_file *seq, void *unused)
{
struct amd_pmf_dev *dev = seq->private;
@@ -90,8 +108,9 @@ static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
- debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
- &current_power_limits_fops);
+ if (dev->pmf_if_version == PMF_IF_V1)
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
}
int amd_pmf_get_power_source(void)
@@ -108,10 +127,11 @@ static void amd_pmf_get_metrics(struct work_struct *work)
ktime_t time_elapsed_ms;
int socket_power;
- mutex_lock(&dev->update_mutex);
+ guard(mutex)(&dev->update_mutex);
+
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
@@ -130,7 +150,6 @@ static void amd_pmf_get_metrics(struct work_struct *work)
dev->start_time = ktime_to_ms(ktime_get());
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
- mutex_unlock(&dev->update_mutex);
}
static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
@@ -157,12 +176,26 @@ static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
}
+/**
+ * fixp_q88_fromint: Convert integer to Q8.8
+ * @val: input value
+ *
+ * Converts an integer into binary fixed point format where 8 bits
+ * are used for integer and 8 bits are used for the decimal.
+ *
+ * Return: unsigned integer converted to Q8.8 format
+ */
+u32 fixp_q88_fromint(u32 val)
+{
+ return val << 8;
+}
+
int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
{
int rc;
u32 val;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
@@ -170,7 +203,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -188,7 +221,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -202,21 +235,19 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
case AMD_PMF_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
case AMD_PMF_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmf_dump_registers(dev);
return rc;
}
@@ -224,27 +255,55 @@ out_unlock:
static const struct pci_device_id pmf_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ }
};
-int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
{
u64 phys_addr;
u32 hi, low;
- INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
-
/* Get Metrics Table Address */
- dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
- if (!dev->buf)
- return -ENOMEM;
+ if (alloc_buffer) {
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PS:
+ case AMD_CPU_ID_RMB:
+ dev->mtable_size = sizeof(dev->m_table);
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
+ dev->mtable_size = sizeof(dev->m_table_v2);
+ break;
+ default:
+ dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
+ }
+
+ dev->buf = devm_kzalloc(dev->dev, dev->mtable_size, GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+ }
phys_addr = virt_to_phys(dev->buf);
hi = phys_addr >> 32;
low = phys_addr & GENMASK(31, 0);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL);
+
+ return 0;
+}
+
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ return ret;
/*
* Start collecting the metrics data after a small delay
@@ -255,23 +314,66 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
return 0;
}
+static int amd_pmf_suspend_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
+
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
+
+ return 0;
+}
+
+static int amd_pmf_resume_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pdev->buf) {
+ ret = amd_pmf_set_dram_addr(pdev, false);
+ if (ret)
+ return ret;
+ }
+
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
+
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
+
static void amd_pmf_init_features(struct amd_pmf_dev *dev)
{
int ret;
/* Enable Static Slider */
- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
amd_pmf_init_sps(dev);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- /* Enable Auto Mode */
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
+ dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
- /* Enable Cool n Quiet Framework (CnQF) */
ret = amd_pmf_init_cnqf(dev);
if (ret)
dev_warn(dev->dev, "CnQF Init failed\n");
@@ -280,10 +382,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
{
- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
- amd_pmf_deinit_sps(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ }
- if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ if (dev->smart_pc_enabled) {
+ amd_pmf_deinit_smart_pc(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
@@ -294,6 +400,10 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
static const struct acpi_device_id amd_pmf_acpi_ids[] = {
{"AMDI0100", 0x100},
{"AMDI0102", 0},
+ {"AMDI0103", 0},
+ {"AMDI0105", 0},
+ {"AMDI0107", 0},
+ {"AMDI0108", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
@@ -329,32 +439,21 @@ static int amd_pmf_probe(struct platform_device *pdev)
}
dev->cpu_id = rdev->device;
- err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
- }
- err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
if (err) {
pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ return dev_err_probe(dev->dev, pcibios_err_to_errno(err),
+ "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
}
base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
- err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
- }
-
- err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
if (err) {
pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ return dev_err_probe(dev->dev, pcibios_err_to_errno(err),
+ "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
}
base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
@@ -366,30 +465,40 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
+ err = devm_mutex_init(dev->dev, &dev->lock);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->update_mutex);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->cb_mutex);
+ if (err)
+ return err;
+
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
+ amd_pmf_dbgfs_register(dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
- amd_pmf_dbgfs_register(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
}
-static int amd_pmf_remove(struct platform_device *pdev)
+static void amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
amd_pmf_deinit_features(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
- kfree(dev->buf);
- return 0;
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
@@ -402,6 +511,7 @@ static struct platform_driver amd_pmf_driver = {
.name = "amd-pmf",
.acpi_match_table = amd_pmf_acpi_ids,
.dev_groups = amd_pmf_driver_groups,
+ .pm = pm_sleep_ptr(&amd_pmf_pm),
},
.probe = amd_pmf_probe,
.remove = amd_pmf_remove,
@@ -410,3 +520,4 @@ module_platform_driver(amd_pmf_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
+MODULE_SOFTDEP("pre: amdtee");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 84bbe2c6ea61..9144c8c3bbaf 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -12,8 +12,25 @@
#define PMF_H
#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
#include <linux/platform_profile.h>
+#define POLICY_BUF_MAX_SZ 0x4b000
+#define POLICY_SIGN_COOKIE 0x31535024
+#define POLICY_COOKIE_OFFSET 0x10
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
+
+struct cookie_header {
+ u32 sign;
+ u32 length;
+} __packed;
+
/* APMF Functions */
#define APMF_FUNC_VERIFY_INTERFACE 0
#define APMF_FUNC_GET_SYS_PARAMS 1
@@ -21,9 +38,12 @@
#define APMF_FUNC_SBIOS_HEARTBEAT 4
#define APMF_FUNC_AUTO_MODE 5
#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
+#define APMF_FUNC_NOTIFY_SMART_PC_UPDATES 14
+#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16
/* Message Definitions */
#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
@@ -43,6 +63,17 @@
#define GET_STT_MIN_LIMIT 0x1F
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
+#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */
+#define SET_PMF_PPT 0x25
+#define SET_PMF_PPT_APU_ONLY 0x26
+
+/* OS slider update notification */
+#define DC_BEST_PERF 0
+#define DC_BETTER_PERF 1
+#define DC_BATTERY_SAVER 3
+#define AC_BEST_PERF 4
+#define AC_BETTER_PERF 5
+#define AC_BETTER_BATTERY 6
/* Fan Index for Auto Mode */
#define FAN_INDEX_AUTO 0xFFFFFFFF
@@ -50,6 +81,90 @@
#define ARG_NONE 0
#define AVG_SAMPLE_SIZE 3
+/* Policy Actions */
+#define PMF_POLICY_SPL 2
+#define PMF_POLICY_SPPT 3
+#define PMF_POLICY_FPPT 4
+#define PMF_POLICY_SPPT_APU_ONLY 5
+#define PMF_POLICY_STT_MIN 6
+#define PMF_POLICY_STT_SKINTEMP_APU 7
+#define PMF_POLICY_STT_SKINTEMP_HS2 8
+#define PMF_POLICY_SYSTEM_STATE 9
+#define PMF_POLICY_BIOS_OUTPUT_1 10
+#define PMF_POLICY_BIOS_OUTPUT_2 11
+#define PMF_POLICY_P3T 38
+#define PMF_POLICY_PMF_PPT 54
+#define PMF_POLICY_PMF_PPT_APU_ONLY 55
+#define PMF_POLICY_BIOS_OUTPUT_3 57
+#define PMF_POLICY_BIOS_OUTPUT_4 58
+#define PMF_POLICY_BIOS_OUTPUT_5 59
+#define PMF_POLICY_BIOS_OUTPUT_6 60
+#define PMF_POLICY_BIOS_OUTPUT_7 61
+#define PMF_POLICY_BIOS_OUTPUT_8 62
+#define PMF_POLICY_BIOS_OUTPUT_9 63
+#define PMF_POLICY_BIOS_OUTPUT_10 64
+
+/* TA macros */
+#define PMF_TA_IF_VERSION_MAJOR 1
+#define TA_PMF_ACTION_MAX 32
+#define TA_PMF_UNDO_MAX 8
+#define TA_OUTPUT_RESERVED_MEM 922
+#define MAX_OPERATION_PARAMS 4
+
+#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002
+#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d
+
+#define PMF_IF_V1 1
+#define PMF_IF_V2 2
+
+#define APTS_MAX_STATES 16
+#define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7)
+#define BIOS_INPUTS_MAX 10
+
+/* amd_pmf_send_cmd() set/get */
+#define SET_CMD false
+#define GET_CMD true
+
+#define METRICS_TABLE_ID 7
+
+typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data);
+
+/* APTS PMF BIOS Interface */
+struct amd_pmf_apts_output {
+ u16 table_version;
+ u32 fan_table_idx;
+ u32 pmf_ppt;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 stt_skin_temp_limit_apu;
+ u8 stt_skin_temp_limit_hs2;
+} __packed;
+
+struct amd_pmf_apts_granular_output {
+ u16 size;
+ struct amd_pmf_apts_output val;
+} __packed;
+
+struct amd_pmf_apts_granular {
+ u16 size;
+ struct amd_pmf_apts_output val[APTS_MAX_STATES];
+};
+
+struct sbios_hb_event_v2 {
+ u16 size;
+ u8 load;
+ u8 unload;
+ u8 suspend;
+ u8 resume;
+} __packed;
+
+enum sbios_hb_v2 {
+ ON_LOAD,
+ ON_UNLOAD,
+ ON_SUSPEND,
+ ON_RESUME,
+};
+
/* AMD PMF BIOS interfaces */
struct apmf_verify_interface {
u16 size;
@@ -81,12 +196,89 @@ struct apmf_sbios_req {
u8 skin_temp_hs2;
} __packed;
+/* As per APMF spec 1.3 */
+struct apmf_sbios_req_v1 {
+ u16 size;
+ u32 pending_req;
+ u8 rsvd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u8 enable_cnqf;
+ u32 custom_policy[BIOS_INPUTS_MAX];
+} __packed;
+
+struct apmf_sbios_req_v2 {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u32 ppt_pmf;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u32 custom_policy[BIOS_INPUTS_MAX];
+} __packed;
+
struct apmf_fan_idx {
u16 size;
u8 fan_ctl_mode;
u32 fan_ctl_idx;
} __packed;
+struct smu_pmf_metrics_v2 {
+ u16 core_frequency[16]; /* MHz */
+ u16 core_power[16]; /* mW */
+ u16 core_temp[16]; /* centi-C */
+ u16 gfx_temp; /* centi-C */
+ u16 soc_temp; /* centi-C */
+ u16 stapm_opn_limit; /* mW */
+ u16 stapm_cur_limit; /* mW */
+ u16 infra_cpu_maxfreq; /* MHz */
+ u16 infra_gfx_maxfreq; /* MHz */
+ u16 skin_temp; /* centi-C */
+ u16 gfxclk_freq; /* MHz */
+ u16 fclk_freq; /* MHz */
+ u16 gfx_activity; /* GFX busy % [0-100] */
+ u16 socclk_freq; /* MHz */
+ u16 vclk_freq; /* MHz */
+ u16 vcn_activity; /* VCN busy % [0-100] */
+ u16 vpeclk_freq; /* MHz */
+ u16 npuclk_freq; /* MHz */
+ u16 npu_busy[8]; /* NPU busy % [0-100] */
+ u16 dram_reads; /* MB/sec */
+ u16 dram_writes; /* MB/sec */
+ u16 core_c0residency[16]; /* C0 residency % [0-100] */
+ u16 npu_power; /* mW */
+ u32 apu_power; /* mW */
+ u32 gfx_power; /* mW */
+ u32 dgpu_power; /* mW */
+ u32 socket_power; /* mW */
+ u32 all_core_power; /* mW */
+ u32 filter_alpha_value; /* time constant [us] */
+ u32 metrics_counter;
+ u16 memclk_freq; /* MHz */
+ u16 mpnpuclk_freq; /* MHz */
+ u16 npu_reads; /* MB/sec */
+ u16 npu_writes; /* MB/sec */
+ u32 throttle_residency_prochot;
+ u32 throttle_residency_spl;
+ u32 throttle_residency_fppt;
+ u32 throttle_residency_sppt;
+ u32 throttle_residency_thm_core;
+ u32 throttle_residency_thm_gfx;
+ u32 throttle_residency_thm_soc;
+ u16 psys;
+ u16 spare1;
+ u32 spare[6];
+} __packed;
+
struct smu_pmf_metrics {
u16 gfxclk_freq; /* in MHz */
u16 socclk_freq; /* in MHz */
@@ -120,6 +312,21 @@ struct smu_pmf_metrics {
u16 infra_gfx_maxfreq; /* in MHz */
u16 skin_temp; /* in centi-Celsius */
u16 device_state;
+ u16 curtemp; /* in centi-Celsius */
+ u16 filter_alpha_value;
+ u16 avg_gfx_clkfrequency;
+ u16 avg_fclk_frequency;
+ u16 avg_gfx_activity;
+ u16 avg_socclk_frequency;
+ u16 avg_vclk_frequency;
+ u16 avg_vcn_activity;
+ u16 avg_dram_reads;
+ u16 avg_dram_writes;
+ u16 avg_socket_power;
+ u16 avg_core_power[2];
+ u16 avg_core_c0residency[16];
+ u16 spare1;
+ u32 metrics_counter;
} __packed;
enum amd_stt_skin_temp {
@@ -146,6 +353,18 @@ enum power_modes {
POWER_MODE_MAX,
};
+enum power_modes_v2 {
+ POWER_MODE_BEST_PERFORMANCE,
+ POWER_MODE_BALANCED,
+ POWER_MODE_BEST_POWER_EFFICIENCY,
+ POWER_MODE_ENERGY_SAVE,
+ POWER_MODE_V2_MAX,
+};
+
+struct pmf_bios_inputs_prev {
+ u32 custom_bios_inputs[BIOS_INPUTS_MAX];
+};
+
struct amd_pmf_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -156,11 +375,12 @@ struct amd_pmf_dev {
struct mutex lock; /* protects the PMF interface */
u32 supported_func;
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile class device */
struct dentry *dbgfs_dir;
int hb_interval; /* SBIOS heartbeat interval */
struct delayed_work heart_beat;
struct smu_pmf_metrics m_table;
+ struct smu_pmf_metrics_v2 m_table_v2;
struct delayed_work work_buffer;
ktime_t start_time;
int socket_power_history[AVG_SAMPLE_SIZE];
@@ -169,8 +389,36 @@ struct amd_pmf_dev {
struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
bool cnqf_enabled;
bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
+ /* Smart PC solution builder */
+ struct dentry *esbin;
+ unsigned char *policy_buf;
+ resource_size_t policy_sz;
+ struct tee_context *tee_ctx;
+ struct tee_shm *fw_shm_pool;
+ u32 session_id;
+ void *shbuf;
+ struct delayed_work pb_work;
+ struct pmf_action_table *prev_data;
+ resource_size_t policy_addr;
+ void __iomem *policy_base;
+ bool smart_pc_enabled;
+ u16 pmf_if_version;
+ struct input_dev *pmf_idev;
+ size_t mtable_size;
+ struct resource *res;
+ struct apmf_sbios_req_v2 req; /* To get custom bios pending request */
+ struct mutex cb_mutex;
+ u32 notifications;
+ struct apmf_sbios_req_v1 req1;
+ struct pmf_bios_inputs_prev cb_prev; /* To preserve custom BIOS inputs */
+ bool cb_flag; /* To handle first custom BIOS input */
};
+struct apmf_sps_prop_granular_v2 {
+ u8 power_states[POWER_SOURCE_MAX][POWER_MODE_V2_MAX];
+} __packed;
+
struct apmf_sps_prop_granular {
u32 fppt;
u32 sppt;
@@ -192,6 +440,27 @@ struct amd_pmf_static_slider_granular {
struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
};
+struct apmf_static_slider_granular_output_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+} __packed;
+
+struct amd_pmf_static_slider_granular_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+};
+
+struct os_power_slider {
+ u16 size;
+ u8 slider_event;
+} __packed;
+
+struct amd_pmf_notify_smart_pc_update {
+ u16 size;
+ u32 pending_req;
+ u32 custom_bios[BIOS_INPUTS_MAX];
+} __packed;
+
struct fan_table_control {
bool manual;
unsigned long fan_id;
@@ -374,6 +643,192 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
+/* Smart PC - TA internals */
+enum system_state {
+ SYSTEM_STATE_S0i3,
+ SYSTEM_STATE_S4,
+ SYSTEM_STATE_SCREEN_LOCK,
+ SYSTEM_STATE_MAX,
+};
+
+enum ta_slider {
+ TA_BEST_BATTERY,
+ TA_BETTER_BATTERY,
+ TA_BETTER_PERFORMANCE,
+ TA_BEST_PERFORMANCE,
+ TA_MAX,
+};
+
+struct amd_pmf_pb_bitmap {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmf_pb_bitmap custom_bios_inputs[] __used = {
+ {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(5)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(6)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(7)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(8)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(9)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(10)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(11)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(12)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(13)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(14)},
+};
+
+static const struct amd_pmf_pb_bitmap custom_bios_inputs_v1[] __used = {
+ {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(7)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(8)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(9)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(10)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(11)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(12)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(13)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(14)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(15)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(16)},
+};
+
+enum platform_type {
+ PTYPE_UNKNOWN = 0,
+ LID_CLOSE,
+ CLAMSHELL,
+ FLAT,
+ TENT,
+ STAND,
+ TABLET,
+ BOOK,
+ PRESENTATION,
+ PULL_FWD,
+ PTYPE_INVALID = 0xf,
+};
+
+/* Command ids for TA communication */
+enum ta_pmf_command {
+ TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE,
+ TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES,
+};
+
+enum ta_pmf_error_type {
+ TA_PMF_TYPE_SUCCESS,
+ TA_PMF_ERROR_TYPE_GENERIC,
+ TA_PMF_ERROR_TYPE_CRYPTO,
+ TA_PMF_ERROR_TYPE_CRYPTO_VALIDATE,
+ TA_PMF_ERROR_TYPE_CRYPTO_VERIFY_OEM,
+ TA_PMF_ERROR_TYPE_POLICY_BUILDER,
+ TA_PMF_ERROR_TYPE_PB_CONVERT,
+ TA_PMF_ERROR_TYPE_PB_SETUP,
+ TA_PMF_ERROR_TYPE_PB_ENACT,
+ TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_INFO,
+ TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_PCIE_INFO,
+ TA_PMF_ERROR_TYPE_SYS_DRV_FW_VALIDATION,
+ TA_PMF_ERROR_TYPE_MAX,
+};
+
+struct pmf_action_table {
+ enum system_state system_state;
+ u32 spl; /* in mW */
+ u32 sppt; /* in mW */
+ u32 sppt_apuonly; /* in mW */
+ u32 fppt; /* in mW */
+ u32 stt_minlimit; /* in mW */
+ u32 stt_skintemp_apu; /* in C */
+ u32 stt_skintemp_hs2; /* in C */
+ u32 p3t_limit; /* in mW */
+ u32 pmf_ppt; /* in mW */
+ u32 pmf_ppt_apu_only; /* in mW */
+};
+
+/* Input conditions */
+struct ta_pmf_condition_info {
+ u32 power_source;
+ u32 bat_percentage;
+ u32 power_slider;
+ u32 lid_state;
+ bool user_present;
+ u32 bios_input_1[2];
+ u32 monitor_count;
+ u32 rsvd2[2];
+ u32 bat_design;
+ u32 full_charge_capacity;
+ int drain_rate;
+ bool user_engaged;
+ u32 device_state;
+ u32 socket_power;
+ u32 skin_temperature;
+ u32 rsvd3[2];
+ u32 platform_type;
+ u32 rsvd3_1[2];
+ u32 ambient_light;
+ u32 length;
+ u32 avg_c0residency;
+ u32 max_c0residency;
+ u32 s0i3_entry;
+ u32 gfx_busy;
+ u32 rsvd4[7];
+ bool camera_state;
+ u32 workload_type;
+ u32 display_type;
+ u32 display_state;
+ u32 rsvd5_1[17];
+ u32 bios_input_2[8];
+ u32 rsvd5[125];
+};
+
+struct ta_pmf_load_policy_table {
+ u32 table_size;
+ u8 table[POLICY_BUF_MAX_SZ];
+};
+
+/* TA initialization params */
+struct ta_pmf_init_table {
+ u32 frequency; /* SMU sampling frequency */
+ bool validate;
+ bool sku_check;
+ bool metadata_macrocheck;
+ struct ta_pmf_load_policy_table policies_table;
+};
+
+/* Everything the TA needs to Enact Policies */
+struct ta_pmf_enact_table {
+ struct ta_pmf_condition_info ev_info;
+ u32 name;
+};
+
+struct ta_pmf_action {
+ u32 action_index;
+ u32 value;
+ u32 spl_arg;
+};
+
+/* Output actions from TA */
+struct ta_pmf_enact_result {
+ u32 actions_count;
+ struct ta_pmf_action actions_list[TA_PMF_ACTION_MAX];
+ u32 undo_count;
+ struct ta_pmf_action undo_list[TA_PMF_UNDO_MAX];
+};
+
+union ta_pmf_input {
+ struct ta_pmf_enact_table enact_table;
+ struct ta_pmf_init_table init_table;
+};
+
+union ta_pmf_output {
+ struct ta_pmf_enact_result policy_apply_table;
+ u32 rsvd[TA_OUTPUT_RESERVED_MEM];
+};
+
+struct ta_pmf_shared_memory {
+ int command_id;
+ int resp_id;
+ u32 pmf_result;
+ u32 if_version;
+ union ta_pmf_output pmf_output;
+ union ta_pmf_input pmf_input;
+};
+
/* Core Layer */
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
@@ -382,18 +837,31 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
+u32 fixp_q88_fromint(u32 val);
+int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table);
int amd_pmf_init_sps(struct amd_pmf_dev *dev);
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+const char *amd_pmf_source_as_str(unsigned int state);
+const char *amd_pmf_source_as_str(unsigned int state);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *dev,
+ struct apmf_static_slider_granular_output_v2 *data);
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
@@ -401,6 +869,8 @@ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req);
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req);
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
@@ -414,4 +884,15 @@ void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
extern const struct attribute_group cnqf_feature_attribute_group;
+/* Smart PC builder Layer */
+int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev);
+int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
+int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx);
+
+/* Smart PC - TA interfaces */
+void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
new file mode 100644
index 000000000000..0a37dc6a7950
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver - Smart PC Capabilities
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Patil Rajesh Reddy <Patil.Reddy@amd.com>
+ */
+
+#include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
+#include <linux/power_supply.h>
+#include <linux/units.h>
+#include "pmf.h"
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *platform_type_as_str(u16 platform_type)
+{
+ switch (platform_type) {
+ case CLAMSHELL:
+ return "CLAMSHELL";
+ case FLAT:
+ return "FLAT";
+ case TENT:
+ return "TENT";
+ case STAND:
+ return "STAND";
+ case TABLET:
+ return "TABLET";
+ case BOOK:
+ return "BOOK";
+ case PRESENTATION:
+ return "PRESENTATION";
+ case PULL_FWD:
+ return "PULL_FWD";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *laptop_placement_as_str(u16 device_state)
+{
+ switch (device_state) {
+ case ON_TABLE:
+ return "ON_TABLE";
+ case ON_LAP_MOTION:
+ return "ON_LAP_MOTION";
+ case IN_BAG:
+ return "IN_BAG";
+ case OUT_OF_BAG:
+ return "OUT_OF_BAG";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *ta_slider_as_str(unsigned int state)
+{
+ switch (state) {
+ case TA_BEST_PERFORMANCE:
+ return "PERFORMANCE";
+ case TA_BETTER_PERFORMANCE:
+ return "BALANCED";
+ case TA_BEST_BATTERY:
+ return "POWER_SAVER";
+ default:
+ return "Unknown TA Slider State";
+ }
+}
+
+static u32 amd_pmf_get_ta_custom_bios_inputs(struct ta_pmf_enact_table *in, int index)
+{
+ switch (index) {
+ case 0 ... 1:
+ return in->ev_info.bios_input_1[index];
+ case 2 ... 9:
+ return in->ev_info.bios_input_2[index - 2];
+ default:
+ return 0;
+ }
+}
+
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ int i;
+
+ dev_dbg(dev->dev, "==== TA inputs START ====\n");
+ dev_dbg(dev->dev, "Slider State: %s\n", ta_slider_as_str(in->ev_info.power_slider));
+ dev_dbg(dev->dev, "Power Source: %s\n", amd_pmf_source_as_str(in->ev_info.power_source));
+ dev_dbg(dev->dev, "Battery Percentage: %u\n", in->ev_info.bat_percentage);
+ dev_dbg(dev->dev, "Designed Battery Capacity: %u\n", in->ev_info.bat_design);
+ dev_dbg(dev->dev, "Fully Charged Capacity: %u\n", in->ev_info.full_charge_capacity);
+ dev_dbg(dev->dev, "Drain Rate: %d\n", in->ev_info.drain_rate);
+ dev_dbg(dev->dev, "Socket Power: %u\n", in->ev_info.socket_power);
+ dev_dbg(dev->dev, "Skin Temperature: %u\n", in->ev_info.skin_temperature);
+ dev_dbg(dev->dev, "Avg C0 Residency: %u\n", in->ev_info.avg_c0residency);
+ dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
+ dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
+ dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+ dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+ dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
+ dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type));
+ dev_dbg(dev->dev, "Laptop placement: %s\n",
+ laptop_placement_as_str(in->ev_info.device_state));
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++)
+ dev_dbg(dev->dev, "Custom BIOS input%d: %u\n", i + 1,
+ amd_pmf_get_ta_custom_bios_inputs(in, i));
+ dev_dbg(dev->dev, "==== TA inputs END ====\n");
+}
+#else
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
+#endif
+
+/*
+ * This helper function sets the appropriate BIOS input value in the TA enact
+ * table based on the provided index. We need this approach because the custom
+ * BIOS input array is not continuous, due to the existing TA structure layout.
+ */
+static void amd_pmf_set_ta_custom_bios_input(struct ta_pmf_enact_table *in, int index, u32 value)
+{
+ switch (index) {
+ case 0 ... 1:
+ in->ev_info.bios_input_1[index] = value;
+ break;
+ case 2 ... 9:
+ in->ev_info.bios_input_2[index - 2] = value;
+ break;
+ default:
+ return;
+ }
+}
+
+static void amd_pmf_update_bios_inputs(struct amd_pmf_dev *pdev, u32 pending_req,
+ const struct amd_pmf_pb_bitmap *inputs,
+ const u32 *custom_policy, struct ta_pmf_enact_table *in)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++) {
+ if (!(pending_req & inputs[i].bit_mask))
+ continue;
+ amd_pmf_set_ta_custom_bios_input(in, i, custom_policy[i]);
+ pdev->cb_prev.custom_bios_inputs[i] = custom_policy[i];
+ dev_dbg(pdev->dev, "Custom BIOS Input[%d]: %u\n", i, custom_policy[i]);
+ }
+}
+
+static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev,
+ struct ta_pmf_enact_table *in)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++)
+ amd_pmf_set_ta_custom_bios_input(in, i, pdev->cb_prev.custom_bios_inputs[i]);
+
+ if (!(pdev->req.pending_req || pdev->req1.pending_req))
+ return;
+
+ if (!pdev->smart_pc_enabled)
+ return;
+
+ switch (pdev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pdev))
+ return;
+ amd_pmf_update_bios_inputs(pdev, pdev->req1.pending_req, custom_bios_inputs_v1,
+ pdev->req1.custom_policy, in);
+ break;
+ case PMF_IF_V2:
+ amd_pmf_update_bios_inputs(pdev, pdev->req.pending_req, custom_bios_inputs,
+ pdev->req.custom_policy, in);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear pending requests after handling */
+ memset(&pdev->req, 0, sizeof(pdev->req));
+ memset(&pdev->req1, 0, sizeof(pdev->req1));
+}
+
+static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
+{
+ u16 max, avg = 0;
+ int i;
+
+ /* Get the avg and max C0 residency of all the cores */
+ max = *core_res;
+ for (i = 0; i < size; i++) {
+ avg += core_res[i];
+ if (core_res[i] > max)
+ max = core_res[i];
+ }
+ avg = DIV_ROUND_CLOSEST(avg, size);
+ in->ev_info.avg_c0residency = avg;
+ in->ev_info.max_c0residency = max;
+}
+
+static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ /* Get the updated metrics table data */
+ memset(dev->buf, 0, dev->mtable_size);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
+
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PS:
+ memcpy(&dev->m_table, dev->buf, dev->mtable_size);
+ in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+ in->ev_info.skin_temperature = dev->m_table.skin_temp;
+ in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity;
+ amd_pmf_get_c0_residency(dev->m_table.avg_core_c0residency,
+ ARRAY_SIZE(dev->m_table.avg_core_c0residency), in);
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
+ memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size);
+ in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power;
+ in->ev_info.skin_temperature = dev->m_table_v2.skin_temp;
+ in->ev_info.gfx_busy = dev->m_table_v2.gfx_activity;
+ amd_pmf_get_c0_residency(dev->m_table_v2.core_c0residency,
+ ARRAY_SIZE(dev->m_table_v2.core_c0residency), in);
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported CPU id: 0x%x", dev->cpu_id);
+ }
+}
+
+static const char * const pmf_battery_supply_name[] = {
+ "BATT",
+ "BAT0",
+};
+
+static int amd_pmf_get_battery_prop(enum power_supply_property prop)
+{
+ union power_supply_propval value;
+ struct power_supply *psy;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pmf_battery_supply_name); i++) {
+ psy = power_supply_get_by_name(pmf_battery_supply_name[i]);
+ if (!psy)
+ continue;
+
+ ret = power_supply_get_property(psy, prop, &value);
+ if (ret) {
+ power_supply_put(psy);
+ return ret;
+ }
+ }
+
+ return value.intval;
+}
+
+static int amd_pmf_get_battery_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ int val;
+
+ val = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_PRESENT);
+ if (val < 0)
+ return val;
+ if (val != 1)
+ return -ENODEV;
+
+ in->ev_info.bat_percentage = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_CAPACITY);
+ /* all values in mWh metrics */
+ in->ev_info.bat_design = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN) /
+ MILLIWATT_PER_WATT;
+ in->ev_info.full_charge_capacity = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL) /
+ MILLIWATT_PER_WATT;
+ in->ev_info.drain_rate = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_POWER_NOW) /
+ MILLIWATT_PER_WATT;
+
+ return 0;
+}
+
+static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ int val;
+
+ switch (dev->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ val = TA_BEST_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ val = TA_BETTER_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
+ val = TA_BEST_BATTERY;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+ in->ev_info.power_slider = val;
+
+ return 0;
+}
+
+static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ struct amd_sfh_info sfh_info;
+
+ /* Get the latest information from SFH */
+ in->ev_info.user_present = false;
+
+ /* Get ALS data */
+ if (!amd_get_sfh_info(&sfh_info, MT_ALS))
+ in->ev_info.ambient_light = sfh_info.ambient_light;
+ else
+ dev_dbg(dev->dev, "ALS is not enabled/detected\n");
+
+ /* get HPD data */
+ if (!amd_get_sfh_info(&sfh_info, MT_HPD)) {
+ if (sfh_info.user_present == SFH_USER_PRESENT)
+ in->ev_info.user_present = true;
+ } else {
+ dev_dbg(dev->dev, "HPD is not enabled/detected\n");
+ }
+
+ /* Get SRA (Secondary Accelerometer) data */
+ if (!amd_get_sfh_info(&sfh_info, MT_SRA)) {
+ in->ev_info.platform_type = sfh_info.platform_type;
+ in->ev_info.device_state = sfh_info.laptop_placement;
+ } else {
+ dev_dbg(dev->dev, "SRA is not enabled/detected\n");
+ }
+}
+
+void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ /* TA side lid open is 1 and close is 0, hence the ! here */
+ in->ev_info.lid_state = !acpi_lid_open();
+ in->ev_info.power_source = amd_pmf_get_power_source();
+ amd_pmf_get_smu_info(dev, in);
+ amd_pmf_get_battery_info(dev, in);
+ amd_pmf_get_slider_info(dev, in);
+ amd_pmf_get_sensor_info(dev, in);
+ amd_pmf_get_custom_bios_inputs(dev, in);
+}
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index dba7e36962dc..0b70a5153f46 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -10,7 +10,158 @@
#include "pmf.h"
+static struct amd_pmf_static_slider_granular_v2 config_store_v2;
static struct amd_pmf_static_slider_granular config_store;
+static struct amd_pmf_apts_granular apts_config_store;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *slider_v2_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_BEST_PERFORMANCE:
+ return "Best Performance";
+ case POWER_MODE_BALANCED:
+ return "Balanced";
+ case POWER_MODE_BEST_POWER_EFFICIENCY:
+ return "Best Power Efficiency";
+ case POWER_MODE_ENERGY_SAVE:
+ return "Energy Save";
+ default:
+ return "Unknown Power Mode";
+ }
+}
+
+static const char *slider_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ case POWER_MODE_BALANCED_POWER:
+ return "BALANCED_POWER";
+ case POWER_MODE_POWER_SAVER:
+ return "POWER_SAVER";
+ default:
+ return "Unknown Slider State";
+ }
+}
+
+const char *amd_pmf_source_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_SOURCE_AC:
+ return "AC";
+ case POWER_SOURCE_DC:
+ return "DC";
+ default:
+ return "Unknown Power State";
+ }
+}
+
+static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
+{
+ int i, j;
+
+ pr_debug("Static Slider Data - BEGIN\n");
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ pr_debug("--- Source:%s Mode:%s ---\n", amd_pmf_source_as_str(i),
+ slider_as_str(j));
+ pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
+ pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
+ pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
+ pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
+ pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
+ pr_debug("STT_SkinTempLimit_APU: %u C\n",
+ data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("STT_SkinTempLimit_HS2: %u C\n",
+ data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
+ }
+ }
+
+ pr_debug("Static Slider Data - END\n");
+}
+
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data)
+{
+ unsigned int i, j;
+
+ pr_debug("Static Slider APTS state index data - BEGIN");
+ pr_debug("size: %u\n", data->size);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j),
+ data->sps_idx.power_states[i][j]);
+
+ pr_debug("Static Slider APTS state index data - END\n");
+}
+
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info)
+{
+ int i;
+
+ pr_debug("Static Slider APTS index default values data - BEGIN");
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version);
+ pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx);
+ pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt);
+ pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only);
+ pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit);
+ pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu);
+ pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2);
+ }
+
+ pr_debug("Static Slider APTS index default values data - END");
+}
+#else
+static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {}
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {}
+#endif
+
+static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev)
+{
+ struct amd_pmf_apts_granular_output output;
+ struct amd_pmf_apts_output *ps;
+ int i;
+
+ memset(&apts_config_store, 0, sizeof(apts_config_store));
+
+ ps = apts_config_store.val;
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ apts_get_static_slider_granular_v2(pdev, &output, i);
+ ps[i].table_version = output.val.table_version;
+ ps[i].fan_table_idx = output.val.fan_table_idx;
+ ps[i].pmf_ppt = output.val.pmf_ppt;
+ ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only;
+ ps[i].stt_min_limit = output.val.stt_min_limit;
+ ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu;
+ ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2;
+ }
+
+ amd_pmf_dump_apts_sps_defaults(&apts_config_store);
+}
+
+static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output_v2 output;
+ unsigned int i, j;
+
+ memset(&config_store_v2, 0, sizeof(config_store_v2));
+ apmf_get_static_slider_granular_v2(dev, &output);
+
+ config_store_v2.size = output.size;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ config_store_v2.sps_idx.power_states[i][j] =
+ output.sps_idx.power_states[i][j];
+
+ amd_pmf_dump_sps_defaults_v2(&config_store_v2);
+}
static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
{
@@ -36,6 +187,22 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
idx++;
}
}
+ amd_pmf_dump_sps_defaults(&config_store);
+}
+
+static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
+{
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD,
+ apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
+ apts_config_store.val[idx].stt_min_limit, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
+ fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
+ fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
+ NULL);
}
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
@@ -44,36 +211,85 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
int src = amd_pmf_get_power_source();
if (op == SLIDER_OP_SET) {
- amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD,
config_store.prop[src][idx].sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
config_store.prop[src][idx].stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
- config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
- config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
+ fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
+ fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
+ NULL);
} else if (op == SLIDER_OP_GET) {
- amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
- amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
- amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
- amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, GET_CMD, ARG_NONE,
&table->prop[src][idx].sppt_apu_only);
- amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, GET_CMD, ARG_NONE,
&table->prop[src][idx].stt_min);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
}
}
-static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode)
+{
+ int src, index;
+
+ src = amd_pmf_get_power_source();
+
+ switch (pwr_mode) {
+ case POWER_MODE_PERFORMANCE:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ if (pmf->pmf_if_version == PMF_IF_V2)
+ return amd_pmf_update_sps_power_limits_v2(pmf, mode);
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return pmf->current_profile == PLATFORM_PROFILE_BALANCED;
+}
+
+static int amd_pmf_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
*profile = pmf->current_profile;
return 0;
@@ -85,12 +301,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
mode = POWER_MODE_POWER_SAVER;
break;
default:
@@ -101,46 +319,126 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
return mode;
}
-static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
- enum platform_profile_option profile)
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ u8 flag = 0;
int mode;
+ int src;
- pmf->current_profile = profile;
- mode = amd_pmf_get_pprof_modes(pmf);
+ mode = amd_pmf_get_pprof_modes(dev);
if (mode < 0)
return mode;
- amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+ src = amd_pmf_get_power_source();
+
+ if (src == POWER_SOURCE_AC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(AC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(AC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(AC_BETTER_BATTERY);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+
+ } else if (src == POWER_SOURCE_DC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(DC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(DC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(DC_BATTERY_SAVER);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ apmf_os_power_slider_update(dev, flag);
+
return 0;
}
-int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+static int amd_pmf_profile_set(struct device *dev,
+ enum platform_profile_option profile)
{
- int err;
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
+ int ret = 0;
- dev->current_profile = PLATFORM_PROFILE_BALANCED;
- amd_pmf_load_defaults_sps(dev);
+ pmf->current_profile = profile;
- dev->pprof.profile_get = amd_pmf_profile_get;
- dev->pprof.profile_set = amd_pmf_profile_set;
+ /* Notify EC about the slider position change */
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ ret = amd_pmf_power_slider_update_event(pmf);
+ if (ret)
+ return ret;
+ }
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ ret = amd_pmf_set_sps_power_limits(pmf);
+ if (ret)
+ return ret;
+ }
- /* Create platform_profile structure and register */
- err = platform_profile_register(&dev->pprof);
- if (err)
- dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
- err);
+ return 0;
+}
+
+static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
- return err;
+ return 0;
}
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
+static const struct platform_profile_ops amd_pmf_profile_ops = {
+ .probe = amd_pmf_profile_probe,
+ .hidden_choices = amd_pmf_hidden_choices,
+ .profile_get = amd_pmf_profile_get,
+ .profile_set = amd_pmf_profile_set,
+};
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
{
- platform_profile_remove();
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ if (dev->pmf_if_version == PMF_IF_V2) {
+ amd_pmf_load_defaults_sps_v2(dev);
+ amd_pmf_load_apts_defaults_sps_v2(dev);
+ } else {
+ amd_pmf_load_defaults_sps(dev);
+ }
+
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+ }
+
+ /* Create platform_profile structure and register */
+ dev->ppdev = devm_platform_profile_register(dev->dev, "amd-pmf", dev,
+ &amd_pmf_profile_ops);
+ if (IS_ERR(dev->ppdev))
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %ld\n",
+ PTR_ERR(dev->ppdev));
+
+ return PTR_ERR_OR_ZERO(dev->ppdev);
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
new file mode 100644
index 000000000000..0abce76f89ff
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver - TEE Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "pmf.h"
+
+#define MAX_TEE_PARAM 4
+
+/* Policy binary actions sampling frequency (in ms) */
+static int pb_actions_ms = MSEC_PER_SEC;
+/* Sideload policy binaries to debug policy failures */
+static bool pb_side_load;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+module_param(pb_actions_ms, int, 0644);
+MODULE_PARM_DESC(pb_actions_ms, "Policy binary actions sampling frequency (default = 1000ms)");
+module_param(pb_side_load, bool, 0444);
+MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
+#endif
+
+static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
+ 0xcc, 0x2b, 0x2b, 0x60, 0xd6),
+ UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
+ 0x29, 0xb1, 0x3d, 0x85, 0x43),
+ };
+
+static const char *amd_pmf_uevent_as_str(unsigned int state)
+{
+ switch (state) {
+ case SYSTEM_STATE_S0i3:
+ return "S0i3";
+ case SYSTEM_STATE_S4:
+ return "S4";
+ case SYSTEM_STATE_SCREEN_LOCK:
+ return "SCREEN_LOCK";
+ default:
+ return "Unknown Smart PC event";
+ }
+}
+
+static void amd_pmf_prepare_args(struct amd_pmf_dev *dev, int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = dev->session_id;
+ arg->num_params = MAX_TEE_PARAM;
+
+ /* Fill invoke cmd params */
+ param[0].u.memref.size = sizeof(struct ta_pmf_shared_memory);
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = dev->fw_shm_pool;
+ param[0].u.memref.shm_offs = 0;
+}
+
+static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
+{
+ input_report_key(dev->pmf_idev, event, 1); /* key press */
+ input_sync(dev->pmf_idev);
+ input_report_key(dev->pmf_idev, event, 0); /* key release */
+ input_sync(dev->pmf_idev);
+}
+
+static int amd_pmf_get_bios_output_idx(u32 action_idx)
+{
+ switch (action_idx) {
+ case PMF_POLICY_BIOS_OUTPUT_1:
+ return 0;
+ case PMF_POLICY_BIOS_OUTPUT_2:
+ return 1;
+ case PMF_POLICY_BIOS_OUTPUT_3:
+ return 2;
+ case PMF_POLICY_BIOS_OUTPUT_4:
+ return 3;
+ case PMF_POLICY_BIOS_OUTPUT_5:
+ return 4;
+ case PMF_POLICY_BIOS_OUTPUT_6:
+ return 5;
+ case PMF_POLICY_BIOS_OUTPUT_7:
+ return 6;
+ case PMF_POLICY_BIOS_OUTPUT_8:
+ return 7;
+ case PMF_POLICY_BIOS_OUTPUT_9:
+ return 8;
+ case PMF_POLICY_BIOS_OUTPUT_10:
+ return 9;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void amd_pmf_update_bios_output(struct amd_pmf_dev *pdev, struct ta_pmf_action *action)
+{
+ u32 bios_idx;
+
+ bios_idx = amd_pmf_get_bios_output_idx(action->action_index);
+
+ amd_pmf_smartpc_apply_bios_output(pdev, action->value, BIT(bios_idx), bios_idx);
+}
+
+static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out)
+{
+ struct ta_pmf_action *action;
+ u32 val;
+ int idx;
+
+ for (idx = 0; idx < out->actions_count; idx++) {
+ action = &out->actions_list[idx];
+ val = action->value;
+ switch (action->action_index) {
+ case PMF_POLICY_SPL:
+ if (dev->prev_data->spl != val) {
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update SPL: %u\n", val);
+ dev->prev_data->spl = val;
+ }
+ break;
+
+ case PMF_POLICY_SPPT:
+ if (dev->prev_data->sppt != val) {
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update SPPT: %u\n", val);
+ dev->prev_data->sppt = val;
+ }
+ break;
+
+ case PMF_POLICY_FPPT:
+ if (dev->prev_data->fppt != val) {
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update FPPT: %u\n", val);
+ dev->prev_data->fppt = val;
+ }
+ break;
+
+ case PMF_POLICY_SPPT_APU_ONLY:
+ if (dev->prev_data->sppt_apuonly != val) {
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val);
+ dev->prev_data->sppt_apuonly = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_MIN:
+ if (dev->prev_data->stt_minlimit != val) {
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update STT_MIN: %u\n", val);
+ dev->prev_data->stt_minlimit = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_SKINTEMP_APU:
+ if (dev->prev_data->stt_skintemp_apu != val) {
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
+ fixp_q88_fromint(val), NULL);
+ dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
+ dev->prev_data->stt_skintemp_apu = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_SKINTEMP_HS2:
+ if (dev->prev_data->stt_skintemp_hs2 != val) {
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
+ fixp_q88_fromint(val), NULL);
+ dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
+ dev->prev_data->stt_skintemp_hs2 = val;
+ }
+ break;
+
+ case PMF_POLICY_P3T:
+ if (dev->prev_data->p3t_limit != val) {
+ amd_pmf_send_cmd(dev, SET_P3T, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update P3T: %u\n", val);
+ dev->prev_data->p3t_limit = val;
+ }
+ break;
+
+ case PMF_POLICY_PMF_PPT:
+ if (dev->prev_data->pmf_ppt != val) {
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update PMF PPT: %u\n", val);
+ dev->prev_data->pmf_ppt = val;
+ }
+ break;
+
+ case PMF_POLICY_PMF_PPT_APU_ONLY:
+ if (dev->prev_data->pmf_ppt_apu_only != val) {
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, val, NULL);
+ dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val);
+ dev->prev_data->pmf_ppt_apu_only = val;
+ }
+ break;
+
+ case PMF_POLICY_SYSTEM_STATE:
+ switch (val) {
+ case 0:
+ amd_pmf_update_uevents(dev, KEY_SLEEP);
+ break;
+ case 1:
+ amd_pmf_update_uevents(dev, KEY_SUSPEND);
+ break;
+ case 2:
+ amd_pmf_update_uevents(dev, KEY_SCREENLOCK);
+ break;
+ default:
+ dev_err(dev->dev, "Invalid PMF policy system state: %d\n", val);
+ }
+
+ dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n",
+ amd_pmf_uevent_as_str(val));
+ break;
+
+ case PMF_POLICY_BIOS_OUTPUT_1:
+ case PMF_POLICY_BIOS_OUTPUT_2:
+ case PMF_POLICY_BIOS_OUTPUT_3:
+ case PMF_POLICY_BIOS_OUTPUT_4:
+ case PMF_POLICY_BIOS_OUTPUT_5:
+ case PMF_POLICY_BIOS_OUTPUT_6:
+ case PMF_POLICY_BIOS_OUTPUT_7:
+ case PMF_POLICY_BIOS_OUTPUT_8:
+ case PMF_POLICY_BIOS_OUTPUT_9:
+ case PMF_POLICY_BIOS_OUTPUT_10:
+ amd_pmf_update_bios_output(dev, action);
+ break;
+ }
+ }
+}
+
+int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev)
+{
+ struct ta_pmf_shared_memory *ta_sm = NULL;
+ struct ta_pmf_enact_result *out = NULL;
+ struct ta_pmf_enact_table *in = NULL;
+ struct tee_param param[MAX_TEE_PARAM];
+ struct tee_ioctl_invoke_arg arg;
+ int ret = 0;
+
+ if (!dev->tee_ctx)
+ return -ENODEV;
+
+ memset(dev->shbuf, 0, dev->policy_sz);
+ ta_sm = dev->shbuf;
+ out = &ta_sm->pmf_output.policy_apply_table;
+ in = &ta_sm->pmf_input.enact_table;
+
+ memset(ta_sm, 0, sizeof(*ta_sm));
+ ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES;
+ ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR;
+
+ amd_pmf_populate_ta_inputs(dev, in);
+ amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES, &arg, param);
+
+ ret = tee_client_invoke_func(dev->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(dev->dev, "TEE enact cmd failed. err: %x, ret:%d\n", arg.ret, ret);
+ return ret;
+ }
+
+ if (ta_sm->pmf_result == TA_PMF_TYPE_SUCCESS && out->actions_count) {
+ amd_pmf_dump_ta_inputs(dev, in);
+ dev_dbg(dev->dev, "action count:%u result:%x\n", out->actions_count,
+ ta_sm->pmf_result);
+ amd_pmf_apply_policies(dev, out);
+ }
+
+ return 0;
+}
+
+static int amd_pmf_invoke_cmd_init(struct amd_pmf_dev *dev)
+{
+ struct ta_pmf_shared_memory *ta_sm = NULL;
+ struct tee_param param[MAX_TEE_PARAM];
+ struct ta_pmf_init_table *in = NULL;
+ struct tee_ioctl_invoke_arg arg;
+ int ret = 0;
+
+ if (!dev->tee_ctx) {
+ dev_err(dev->dev, "Failed to get TEE context\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(dev->dev, "Policy Binary size: %llu bytes\n", (unsigned long long)dev->policy_sz);
+ memset(dev->shbuf, 0, dev->policy_sz);
+ ta_sm = dev->shbuf;
+ in = &ta_sm->pmf_input.init_table;
+
+ ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE;
+ ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR;
+
+ in->metadata_macrocheck = false;
+ in->sku_check = false;
+ in->validate = true;
+ in->frequency = pb_actions_ms;
+ in->policies_table.table_size = dev->policy_sz;
+
+ memcpy(in->policies_table.table, dev->policy_buf, dev->policy_sz);
+ amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE, &arg, param);
+
+ ret = tee_client_invoke_func(dev->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(dev->dev, "Failed to invoke TEE init cmd. err: %x, ret:%d\n", arg.ret, ret);
+ return ret;
+ }
+
+ return ta_sm->pmf_result;
+}
+
+static void amd_pmf_invoke_cmd(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, pb_work.work);
+
+ amd_pmf_invoke_cmd_enact(dev);
+ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms));
+}
+
+static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
+{
+ struct cookie_header *header;
+ int res;
+
+ if (dev->policy_sz < POLICY_COOKIE_OFFSET + sizeof(*header))
+ return -EINVAL;
+
+ header = (struct cookie_header *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
+
+ if (header->sign != POLICY_SIGN_COOKIE || !header->length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
+ return -EINVAL;
+ }
+
+ if (dev->policy_sz < header->length + 512)
+ return -EINVAL;
+
+ /* Update the actual length */
+ dev->policy_sz = header->length + 512;
+ res = amd_pmf_invoke_cmd_init(dev);
+ if (res == TA_PMF_TYPE_SUCCESS) {
+ /* Now its safe to announce that smart pc is enabled */
+ dev->smart_pc_enabled = true;
+ /*
+ * Start collecting the data from TA FW after a small delay
+ * or else, we might end up getting stale values.
+ */
+ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
+ } else {
+ dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev->smart_pc_enabled = false;
+ return res;
+ }
+
+ return 0;
+}
+
+static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev)
+{
+ return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz);
+}
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
+{
+ print_hex_dump_debug("(pb): ", DUMP_PREFIX_OFFSET, 16, 1, dev->policy_buf,
+ dev->policy_sz, false);
+}
+
+static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
+ size_t length, loff_t *pos)
+{
+ struct amd_pmf_dev *dev = filp->private_data;
+ unsigned char *new_policy_buf;
+ int ret;
+
+ /* Policy binary size cannot exceed POLICY_BUF_MAX_SZ */
+ if (length > POLICY_BUF_MAX_SZ || length == 0)
+ return -EINVAL;
+
+ /* re-alloc to the new buffer length of the policy binary */
+ new_policy_buf = devm_kzalloc(dev->dev, length, GFP_KERNEL);
+ if (!new_policy_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ devm_kfree(dev->dev, new_policy_buf);
+ return -EFAULT;
+ }
+
+ devm_kfree(dev->dev, dev->policy_buf);
+ dev->policy_buf = new_policy_buf;
+ dev->policy_sz = length;
+
+ if (!amd_pmf_pb_valid(dev))
+ return -EINVAL;
+
+ amd_pmf_hex_dump_pb(dev);
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret < 0)
+ return ret;
+
+ return length;
+}
+
+static const struct file_operations pb_fops = {
+ .write = amd_pmf_get_pb_data,
+ .open = simple_open,
+};
+
+static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root)
+{
+ dev->esbin = debugfs_create_dir("pb", debugfs_root);
+ debugfs_create_file("update_policy", 0644, dev->esbin, dev, &pb_fops);
+}
+
+static void amd_pmf_remove_pb(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->esbin);
+}
+#else
+static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root) {}
+static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
+static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
+#endif
+
+static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return ver->impl_id == TEE_IMPL_ID_AMDTEE;
+}
+
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
+{
+ struct tee_ioctl_open_session_arg sess_arg = {};
+ int rc;
+
+ export_uuid(sess_arg.uuid, uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if (rc < 0 || sess_arg.ret != 0) {
+ pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc);
+ return rc ?: -EINVAL;
+ }
+
+ *id = sess_arg.session;
+
+ return 0;
+}
+
+static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->pmf_idev = devm_input_allocate_device(dev->dev);
+ if (!dev->pmf_idev)
+ return -ENOMEM;
+
+ dev->pmf_idev->name = "PMF-TA output events";
+ dev->pmf_idev->phys = "amd-pmf/input0";
+
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SLEEP);
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SCREENLOCK);
+ input_set_capability(dev->pmf_idev, EV_KEY, KEY_SUSPEND);
+
+ err = input_register_device(dev->pmf_idev);
+ if (err) {
+ dev_err(dev->dev, "Failed to register input device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
+{
+ u32 size;
+ int ret;
+
+ dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL);
+ if (IS_ERR(dev->tee_ctx)) {
+ dev_err(dev->dev, "Failed to open TEE context\n");
+ ret = PTR_ERR(dev->tee_ctx);
+ dev->tee_ctx = NULL;
+ return ret;
+ }
+
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
+ if (ret) {
+ dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
+ ret = -EINVAL;
+ goto out_ctx;
+ }
+
+ size = sizeof(struct ta_pmf_shared_memory) + dev->policy_sz;
+ dev->fw_shm_pool = tee_shm_alloc_kernel_buf(dev->tee_ctx, size);
+ if (IS_ERR(dev->fw_shm_pool)) {
+ dev_err(dev->dev, "Failed to alloc TEE shared memory\n");
+ ret = PTR_ERR(dev->fw_shm_pool);
+ goto out_sess;
+ }
+
+ dev->shbuf = tee_shm_get_va(dev->fw_shm_pool, 0);
+ if (IS_ERR(dev->shbuf)) {
+ dev_err(dev->dev, "Failed to get TEE virtual address\n");
+ ret = PTR_ERR(dev->shbuf);
+ goto out_shm;
+ }
+ dev_dbg(dev->dev, "TEE init done\n");
+
+ return 0;
+
+out_shm:
+ tee_shm_free(dev->fw_shm_pool);
+out_sess:
+ tee_client_close_session(dev->tee_ctx, dev->session_id);
+out_ctx:
+ tee_client_close_context(dev->tee_ctx);
+
+ return ret;
+}
+
+static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
+{
+ if (!dev->tee_ctx)
+ return;
+ tee_shm_free(dev->fw_shm_pool);
+ tee_client_close_session(dev->tee_ctx, dev->session_id);
+ tee_client_close_context(dev->tee_ctx);
+ dev->tee_ctx = NULL;
+}
+
+int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+{
+ bool status;
+ int ret, i;
+
+ ret = apmf_check_smart_pc(dev);
+ if (ret) {
+ /*
+ * Lets not return from here if Smart PC bit is not advertised in
+ * the BIOS. This way, there will be some amount of power savings
+ * to the user with static slider (if enabled).
+ */
+ dev_info(dev->dev, "PMF Smart PC not advertised in BIOS!:%d\n", ret);
+ return -ENODEV;
+ }
+
+ INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ return ret;
+
+ dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
+ if (IS_ERR(dev->policy_base))
+ return PTR_ERR(dev->policy_base);
+
+ dev->policy_buf = devm_kzalloc(dev->dev, dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf)
+ return -ENOMEM;
+
+ memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ if (!amd_pmf_pb_valid(dev)) {
+ dev_info(dev->dev, "No Smart PC policy present\n");
+ return -EINVAL;
+ }
+
+ amd_pmf_hex_dump_pb(dev);
+
+ dev->prev_data = devm_kzalloc(dev->dev, sizeof(*dev->prev_data), GFP_KERNEL);
+ if (!dev->prev_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
+ ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
+ if (ret)
+ return ret;
+
+ ret = amd_pmf_start_policy_engine(dev);
+ dev_dbg(dev->dev, "start policy engine ret: %d\n", ret);
+ status = ret == TA_PMF_TYPE_SUCCESS;
+ if (status) {
+ dev->cb_flag = true;
+ break;
+ }
+ amd_pmf_tee_deinit(dev);
+ }
+
+ if (!status && !pb_side_load) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ ret = amd_pmf_register_input_device(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ amd_pmf_deinit_smart_pc(dev);
+
+ return ret;
+}
+
+void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
+{
+ if (dev->pmf_idev)
+ input_unregister_device(dev->pmf_idev);
+
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+
+ cancel_delayed_work_sync(&dev->pb_work);
+ amd_pmf_tee_deinit(dev);
+}