summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/powerplay
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
commit574cc4539762561d96b456dbc0544d8898bd4c6e (patch)
tree07d84db8cf9fd30cbde6f539ce3a3f6116593e41 /drivers/gpu/drm/amd/powerplay
parent3c2edc36a77420d8be05d656019dbc8c31535992 (diff)
parent945b584c94f8c665b2df3834a8a6a8faf256cd5f (diff)
Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main pull request for 5.4-rc1 merge window. I don't think there is anything outstanding so next week should just be fixes, but we'll see if I missed anything. I landed some fixes earlier in the week but got delayed writing summary and sending it out, due to a mix of sick kid and jetlag! There are some fixes pending, but I'd rather get the main merge out of the way instead of delaying it longer. It's also pretty large in commit count and new amd header file size. The largest thing is four new amdgpu products (navi12/14, arcturus and renoir APU support). Otherwise it's pretty much lots of work across the board, i915 has started landing tigerlake support, lots of icelake fixes and lots of locking reworking for future gpu support, lots of header file rework (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been put into the places they are needed. uapi: - content protection type property for HDCP core: - rework include dependencies - lots of drmP.h removals - link rate calculation robustness fix - make fb helper map only when required - add connector->DDC adapter link - DRM_WAIT_ON removed - drop DRM_AUTH usage from drivers dma-buf: - reservation object fence helper dma-fence: - shrink dma_fence struct - merge signal functions - store timestamps in dma_fence - selftests ttm: - embed drm_get_object struct into ttm_buffer_object - release_notify callback bridges: - sii902x - audio graph card support - tc358767 - aux data handling rework - ti-snd64dsi86 - debugfs support, DSI mode flags support panels: - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 i915: - Initial tigerlake platform support - Locking simplification work, general all over refactoring. - Selftests - HDCP debug info improvements - DSI properties - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI suspend/resume - GuC fixes - Perf fixes - ElkhartLake enablement - DP MST fixes - GVT - command parser enhancements amdgpu: - add wipe memory on release flag for buffer creation - Navi12/14 support (may be marked experimental) - Arcturus support - Renoir APU support - mclk DPM for Navi - DC display fixes - Raven scatter/gather support - RAS support for GFX - Navi12 + Arcturus power features - GPU reset for Picasso - smu11 i2c controller support amdkfd: - navi12/14 support - Arcturus support radeon: - kexec fix nouveau: - improved display color management - detect lack of GPU power cables vmwgfx: - evicition priority support - remove unused security feature msm: - msm8998 display support - better async commit support for cursor updates etnaviv: - per-process address space support - performance counter fixes - softpin support mcde: - DCS transfers fix exynos: - drmP.h cleanup lima: - reduce logging kirin: - misc clenaups komeda: - dual-link support - DT memory regions hisilicon: - misc fixes imx: - IPUv3 image converter fixes - 32-bit RGB V4L2 pixel format support ingenic: - more support for panel related cases mgag200: - cursor support fix panfrost: - export GPU features register to userspace - gpu heap allocations - per-fd address space support pl111: - CLD pads wiring support removed from DT rockchip: - rework to use DRM PSR helpers - fix bug in VOP_WIN_GET macro - DSI DT binding rework sun4i: - improve support for color encoding and range - DDC enabled GPIO tinydrm: - rework SPI support - improve MIPI-DBI support - moved to drm/tiny vkms: - rework CRC tracking dw-hdmi: - get_eld and i2s improvements gm12u320: - misc fixes meson: - global code cleanup - vpu feature detect omap: - alpha/pixel blend mode properties rcar-du: - misc fixes" * tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits) drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init drm/nouveau: Fix ordering between TTM and GEM release drm/nouveau/prime: Extend DMA reservation object lock drm/nouveau: Fix fallout from reservation object rework drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap drm/i915: to make vgpu ppgtt notificaiton as atomic operation drm/i915: Flush the existing fence before GGTT read/write drm/i915: Hold irq-off for the entire fake lock period drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915 drm/i915/gvt: update vgpu workload head pointer correctly drm/mcde: Fix DSI transfers drm/msm: Use the correct dma_sync calls harder drm/msm: remove unlikely() from WARN_ON() conditions drm/msm/dsi: Fix return value check for clk_get_parent drm/msm: add atomic traces drm/msm/dpu: async commit support drm/msm: async commit support drm/msm: split power control from prepare/complete_commit drm/msm: add kms->flush_commit() ...
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c51
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c375
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c1938
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.h72
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h240
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h120
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h891
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h217
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h263
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h27
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h42
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h106
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c346
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c323
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c412
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c259
34 files changed, 5262 insertions, 841 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 727c5cff231c..390345f2d601 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o navi10_ppt.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o vega20_ppt.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index bea1587d352d..fa636cb462c1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -924,6 +924,19 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}
+static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_mp1_state)
+ return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
+
+ return 0;
+}
+
static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
@@ -1495,6 +1508,41 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
return ret;
}
+static int pp_asic_reset_mode_2(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->asic_reset == NULL) {
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
+static int pp_smu_i2c_bus_access(void *handle, bool acquire)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1525,6 +1573,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
+ .set_mp1_state = pp_dpm_set_mp1_state,
.set_power_limit = pp_set_power_limit,
.get_power_limit = pp_get_power_limit,
/* export to DC */
@@ -1550,4 +1599,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_asic_baco_state = pp_set_asic_baco_state,
.get_ppfeature_status = pp_get_ppfeature_status,
.set_ppfeature_status = pp_set_ppfeature_status,
+ .asic_reset_mode_2 = pp_asic_reset_mode_2,
+ .smu_i2c_bus_access = pp_smu_i2c_bus_access,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 8a3eadeebdcb..22f3c60d380f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -27,9 +27,105 @@
#include "amdgpu_smu.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
+#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type) #type
+static const char* __smu_message_names[] = {
+ SMU_MESSAGE_TYPES
+};
+
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
+{
+ if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+ return "unknown smu message";
+ return __smu_message_names[type];
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea) #fea
+static const char* __smu_feature_names[] = {
+ SMU_FEATURE_MASKS
+};
+
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
+{
+ if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+ return "unknown smu feature";
+ return __smu_feature_names[feature];
+}
+
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
+{
+ size_t size = 0;
+ int ret = 0, i = 0;
+ uint32_t feature_mask[2] = { 0 };
+ int32_t feature_index = 0;
+ uint32_t count = 0;
+ uint32_t sort_feature[SMU_FEATURE_COUNT];
+ uint64_t hw_feature_count = 0;
+
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ goto failed;
+
+ size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+ feature_mask[1], feature_mask[0]);
+
+ for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+ feature_index = smu_feature_get_index(smu, i);
+ if (feature_index < 0)
+ continue;
+ sort_feature[feature_index] = i;
+ hw_feature_count++;
+ }
+
+ for (i = 0; i < hw_feature_count; i++) {
+ size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+ count++,
+ smu_get_feature_name(smu, sort_feature[i]),
+ i,
+ !!smu_feature_is_enabled(smu, sort_feature[i]) ?
+ "enabled" : "disabled");
+ }
+
+failed:
+ return size;
+}
+
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
+{
+ int ret = 0;
+ uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_2_enabled = 0;
+ uint64_t feature_2_disabled = 0;
+ uint64_t feature_enables = 0;
+
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
+
+ feature_2_enabled = ~feature_enables & new_mask;
+ feature_2_disabled = feature_enables & ~new_mask;
+
+ if (feature_2_enabled) {
+ ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
+ if (ret)
+ return ret;
+ }
+ if (feature_2_disabled) {
+ ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
{
int ret = 0;
@@ -135,9 +231,8 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
- int ret = 0, clk_id = 0;
- uint32_t param = 0;
uint32_t clock_limit;
+ int ret = 0;
if (!min && !max)
return -EINVAL;
@@ -168,36 +263,11 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
return 0;
}
-
- mutex_lock(&smu->mutex);
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0) {
- ret = -EINVAL;
- goto failed;
- }
-
- param = (clk_id & 0xffff) << 16;
-
- if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
- }
-
- if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
- }
-
-failed:
- mutex_unlock(&smu->mutex);
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
return ret;
}
@@ -262,7 +332,6 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
}
if(!smu_feature_is_enabled(smu, feature_id)) {
- pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
return false;
}
@@ -319,6 +388,9 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)data) = smu->pstate_sclk;
@@ -359,11 +431,12 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
struct smu_table *table = NULL;
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count)
+ if (!table_data || table_id >= smu_table->table_count || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -386,6 +459,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
if (ret)
return ret;
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -396,12 +472,23 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
- else if (adev->asic_type >= CHIP_NAVI10)
+ else if (adev->asic_type >= CHIP_ARCTURUS)
return true;
else
return false;
}
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
+{
+ if (amdgpu_dpm != 1)
+ return false;
+
+ if (adev->asic_type == CHIP_VEGA20)
+ return true;
+
+ return false;
+}
+
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -479,14 +566,55 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
+int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
+{
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ }
+
+ return ret;
+}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
+ struct amdgpu_device *adev = smu->adev;
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
+ if (adev->flags & AMD_IS_APU)
+ return 1;
+
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -501,15 +629,20 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
+ uint64_t feature_mask = 0;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
+ feature_mask = 1ULL << feature_id;
+
mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_id, enable);
+ ret = smu_feature_update_enable_state(smu, feature_mask, enable);
if (ret)
goto failed;
@@ -527,10 +660,12 @@ failed:
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -546,10 +681,12 @@ int smu_feature_set_supported(struct smu_context *smu,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
@@ -570,10 +707,18 @@ static int smu_set_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ case CHIP_ARCTURUS:
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
smu_v11_0_set_smu_funcs(smu);
break;
+ case CHIP_RENOIR:
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+ smu_v12_0_set_smu_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -600,6 +745,7 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
+
mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
@@ -829,6 +975,9 @@ static int smu_override_pcie_parameters(struct smu_context *smu)
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
int ret;
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -875,9 +1024,11 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return 0;
}
- ret = smu_init_display_count(smu, 0);
- if (ret)
- return ret;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_init_display_count(smu, 0);
+ if (ret)
+ return ret;
+ }
if (initialize) {
/* get boot_values from vbios to set revision, gfxclk, and etc. */
@@ -926,6 +1077,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ /* smu_dump_pptable(smu); */
+
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
@@ -947,21 +1100,23 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
- ret = smu_notify_display_change(smu);
- if (ret)
- return ret;
+ ret = smu_notify_display_change(smu);
+ if (ret)
+ return ret;
- /*
- * Set min deep sleep dce fclk with bootup value from vbios via
- * SetMinDeepSleepDcefclk MSG.
- */
- ret = smu_set_min_dcef_deep_sleep(smu);
- if (ret)
- return ret;
+ /*
+ * Set min deep sleep dce fclk with bootup value from vbios via
+ * SetMinDeepSleepDcefclk MSG.
+ */
+ ret = smu_set_min_dcef_deep_sleep(smu);
+ if (ret)
+ return ret;
+ }
/*
* Set initialized values (get from vbios) to dpm tables context such as
@@ -969,7 +1124,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
* type of clks.
*/
if (initialize) {
- ret = smu_populate_smc_pptable(smu);
+ ret = smu_populate_smc_tables(smu);
if (ret)
return ret;
@@ -987,7 +1142,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
if (ret)
return ret;
}
@@ -1072,14 +1227,28 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- ret = smu_check_fw_status(smu);
- if (ret) {
- pr_err("SMC firmware status is not correct\n");
- return ret;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ if (adev->asic_type < CHIP_NAVI10) {
+ ret = smu_load_microcode(smu);
+ if (ret)
+ return ret;
}
}
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
+ return ret;
+ }
+
+ if (adev->flags & AMD_IS_APU) {
+ smu_powergate_sdma(&adev->smu, false);
+ smu_powergate_vcn(&adev->smu, false);
+ }
+
+ if (!smu->pm_enabled)
+ return 0;
+
ret = smu_feature_init_dpm(smu);
if (ret)
goto failed;
@@ -1124,6 +1293,11 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
+ if (adev->flags & AMD_IS_APU) {
+ smu_powergate_sdma(&adev->smu, true);
+ smu_powergate_vcn(&adev->smu, true);
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1431,6 +1605,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (!smu->pm_enabled)
return -EINVAL;
+
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1439,8 +1614,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (!smu->pm_enabled)
- return -EINVAL;
ret = smu_apply_clocks_adjust_rules(smu);
if (ret) {
pr_err("Failed to apply clocks adjust rules!");
@@ -1459,9 +1632,14 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
ret = smu_default_set_performance_level(smu, level);
+ if (ret) {
+ pr_err("Failed to set performance level!");
+ return ret;
+ }
}
- if (!ret)
- smu_dpm_ctx->dpm_level = level;
+
+ /* update the saved copy */
+ smu_dpm_ctx->dpm_level = level;
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1503,6 +1681,42 @@ int smu_handle_task(struct smu_context *smu,
return ret;
}
+int smu_switch_power_profile(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ long workload;
+ uint32_t index;
+
+ if (!smu->pm_enabled)
+ return -EINVAL;
+
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (!en) {
+ smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+ } else {
+ smu->workload_mask |= (1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ smu_set_power_profile_mode(smu, &workload, 0);
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1520,28 +1734,18 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
- int ret = 0;
- int i;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
if (!smu_dpm_ctx->dpm_context)
return -EINVAL;
- for (i = 0; i < smu->adev->num_ip_blocks; i++) {
- if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
- break;
- }
-
-
- smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
- ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ ret = smu_enable_umd_pstate(smu, &level);
if (ret)
return ret;
- mutex_lock(&smu->mutex);
- smu_dpm_ctx->dpm_level = level;
- mutex_unlock(&smu->mutex);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
return ret;
}
@@ -1584,3 +1788,12 @@ const struct amdgpu_ip_block_version smu_v11_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+const struct amdgpu_ip_block_version smu_v12_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 12,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
new file mode 100644
index 000000000000..f1f072012fac
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -0,0 +1,1938 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if_arcturus.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "arcturus_ppt.h"
+#include "smu_v11_0_pptable.h"
+#include "arcturus_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define CTF_OFFSET_EDGE 5
+#define CTF_OFFSET_HOTSPOT 5
+#define CTF_OFFSET_HBM 5
+
+#define MSG_MAP(msg, index) \
+ [SMU_MSG_##msg] = {1, (index)}
+#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
+ [smu_feature] = {1, (arcturus_feature)}
+
+#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF
+#define SMU_FEATURES_LOW_SHIFT 0
+#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+#define SMU_FEATURES_HIGH_SHIFT 32
+
+#define SMC_DPM_FEATURE ( \
+ FEATURE_DPM_PREFETCHER_MASK | \
+ FEATURE_DPM_GFXCLK_MASK | \
+ FEATURE_DPM_UCLK_MASK | \
+ FEATURE_DPM_SOCCLK_MASK | \
+ FEATURE_DPM_MP0CLK_MASK | \
+ FEATURE_DPM_FCLK_MASK | \
+ FEATURE_DPM_XGMI_MASK)
+
+/* possible frequency drift (1Mhz) */
+#define EPSILON 1
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable),
+ MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable),
+ MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh),
+ MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask),
+ MSG_MAP(SetDfSwitchType, PPSMC_MSG_SetDfSwitchType),
+ MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
+ MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit),
+ MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0),
+ MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0),
+ MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1),
+ MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload),
+ MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset),
+ MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
+ MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
+ MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
+ MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
+ MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData),
+ MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
+ MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
+ MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, PPCLK_GFXCLK),
+ CLK_MAP(SCLK, PPCLK_GFXCLK),
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(DPM_PREFETCHER),
+ FEA_MAP(DPM_GFXCLK),
+ FEA_MAP(DPM_UCLK),
+ FEA_MAP(DPM_SOCCLK),
+ FEA_MAP(DPM_FCLK),
+ FEA_MAP(DPM_MP0CLK),
+ ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_LCLK),
+ FEA_MAP(DS_FCLK),
+ FEA_MAP(DS_UCLK),
+ FEA_MAP(GFX_ULV),
+ ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT),
+ FEA_MAP(RSMU_SMN_CG),
+ FEA_MAP(WAFL_CG),
+ FEA_MAP(PPT),
+ FEA_MAP(TDC),
+ FEA_MAP(APCC_PLUS),
+ FEA_MAP(VR0HOT),
+ FEA_MAP(VR1HOT),
+ FEA_MAP(FW_CTF),
+ FEA_MAP(FAN_CONTROL),
+ FEA_MAP(THERMAL),
+ FEA_MAP(OUT_OF_BAND_MONITOR),
+ FEA_MAP(TEMP_DEPENDENT_VMIN),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PPTABLE),
+ TAB_MAP(AVFS),
+ TAB_MAP(AVFS_PSM_DEBUG),
+ TAB_MAP(AVFS_FUSE_OVERRIDE),
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(DRIVER_SMU_CONFIG),
+ TAB_MAP(OVERDRIVE),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+ PWR_MAP(AC),
+ PWR_MAP(DC),
+};
+
+static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
+static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_message_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_clk_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU clk: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_FEATURE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_table_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU table: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_POWER_SOURCE_COUNT)
+ return -EINVAL;
+
+ mapping = arcturus_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU power source: %d\n", index);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+
+static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
+{
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
+ return -EINVAL;
+
+ mapping = arcturus_workload_map[profile];
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU power source: %d\n", profile);
+ return -EINVAL;
+ }
+
+ return mapping.map_to;
+}
+
+static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ return 0;
+}
+
+static int arcturus_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context)
+ return -EINVAL;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct arcturus_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ if (smu_dpm->golden_dpm_context)
+ return -EINVAL;
+
+ smu_dpm->golden_dpm_context = kzalloc(sizeof(struct arcturus_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->golden_dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct arcturus_dpm_table);
+
+ smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_current_power_state)
+ return -ENOMEM;
+
+ smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_request_power_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+arcturus_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ /* pptable will handle the features to enable */
+ memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+
+ return 0;
+}
+
+static int
+arcturus_set_single_dpm_table(struct smu_context *smu,
+ struct arcturus_single_dpm_table *single_dpm_table,
+ PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels = 0, clk;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ if (ret) {
+ pr_err("[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &num_of_levels);
+ if (!num_of_levels) {
+ pr_err("[%s] number of clk levels is invalid!\n", __func__);
+ return -EINVAL;
+ }
+
+ single_dpm_table->count = num_of_levels;
+ for (i = 0; i < num_of_levels; i++) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | i));
+ if (ret) {
+ pr_err("[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &clk);
+ if (!clk) {
+ pr_err("[%s] clk value is invalid!\n", __func__);
+ return -EINVAL;
+ }
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+ }
+ return 0;
+}
+
+static void arcturus_init_single_dpm_state(struct arcturus_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
+}
+
+static int arcturus_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+ struct arcturus_single_dpm_table *single_dpm_table;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ /* socclk */
+ single_dpm_table = &(dpm_table->soc_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[%s] failed to get socclk dpm levels!\n", __func__);
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* gfxclk */
+ single_dpm_table = &(dpm_table->gfx_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* memclk */
+ single_dpm_table = &(dpm_table->mem_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* fclk */
+ single_dpm_table = &(dpm_table->fclk_table);
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ ret = arcturus_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_FCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
+ }
+ arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ memcpy(smu_dpm->golden_dpm_context, dpm_table,
+ sizeof(struct arcturus_dpm_table));
+
+ return 0;
+}
+
+static int arcturus_check_powerplay_table(struct smu_context *smu)
+{
+ return 0;
+}
+
+static int arcturus_store_powerplay_table(struct smu_context *smu)
+{
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
+ sizeof(PPTable_t));
+
+ table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+
+ return ret;
+}
+
+static int arcturus_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_v4_6 *smc_dpm_table;
+ int index, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+ smc_dpm_table->table_header.format_revision,
+ smc_dpm_table->table_header.content_revision);
+
+ if ((smc_dpm_table->table_header.format_revision == 4) &&
+ (smc_dpm_table->table_header.content_revision == 6))
+ memcpy(&smc_pptable->MaxVoltageStepGfx,
+ &smc_dpm_table->maxvoltagestepgfx,
+ sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
+
+ return 0;
+}
+
+static int arcturus_run_btc_afll(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+}
+
+static int arcturus_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+ struct arcturus_single_dpm_table *gfx_table = NULL;
+ struct arcturus_single_dpm_table *mem_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+ gfx_table = &(dpm_table->gfx_table);
+ mem_table = &(dpm_table->mem_table);
+
+ smu->pstate_sclk = gfx_table->dpm_levels[0].value;
+ smu->pstate_mclk = mem_table->dpm_levels[0].value;
+
+ if (gfx_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL) {
+ smu->pstate_sclk = gfx_table->dpm_levels[ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL].value;
+ smu->pstate_mclk = mem_table->dpm_levels[ARCTURUS_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ smu->pstate_sclk = smu->pstate_sclk * 100;
+ smu->pstate_mclk = smu->pstate_mclk * 100;
+
+ return 0;
+}
+
+static int arcturus_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct arcturus_single_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int arcturus_freqs_in_same_level(int32_t frequency1,
+ int32_t frequency2)
+{
+ return (abs(frequency1 - frequency2) <= EPSILON);
+}
+
+static int arcturus_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ struct pp_clock_levels_with_latency clocks;
+ struct arcturus_single_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct arcturus_dpm_table *dpm_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_SCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : "");
+ break;
+
+ case SMU_MCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : "");
+ break;
+
+ case SMU_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : "");
+ break;
+
+ case SMU_FCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->fclk_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get fclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now / 100) ? "*" : "");
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask)
+{
+ struct arcturus_single_dpm_table *single_dpm_table;
+ struct arcturus_dpm_table *dpm_table =
+ smu->smu_dpm.dpm_context;
+ uint32_t freq;
+ int ret = 0;
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
+ single_dpm_table = &(dpm_table->gfx_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
+ single_dpm_table = &(dpm_table->mem_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
+ single_dpm_table = &(dpm_table->soc_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int arcturus_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, uint32_t mask)
+{
+ struct arcturus_dpm_table *dpm_table;
+ struct arcturus_single_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ mutex_lock(&(smu->mutex));
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ switch (type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_SOCCLK:
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_FCLK:
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int arcturus_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ if (!range)
+ return -EINVAL;
+
+ range->max = pptable->TedgeLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_crit_max = pptable->ThotspotLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = pptable->TmemLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_HBM)*
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int arcturus_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+ if (!smu_table->metrics_time ||
+ time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+ return ret;
+}
+
+static int arcturus_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *value = metrics.AverageGfxActivity;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ *value = metrics.AverageUclkActivity;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arcturus_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *value = metrics.AverageSocketPower << 8;
+
+ return 0;
+}
+
+static int arcturus_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ *value = metrics.TemperatureHotspot *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ *value = metrics.TemperatureEdge *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ *value = metrics.TemperatureHBM *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arcturus_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint32_t *)data = pptable->FanMaximumRpm;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = arcturus_get_current_activity_percent(smu,
+ sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = arcturus_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = arcturus_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = smu_smc_read_sensor(smu, sensor, data, size);
+ }
+
+ return ret;
+}
+
+static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ *speed = metrics.CurrFanSpeed;
+
+ return ret;
+}
+
+static int arcturus_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t percent, current_rpm;
+ int ret = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = arcturus_get_fan_speed_rpm(smu, &current_rpm);
+ if (ret)
+ return ret;
+
+ percent = current_rpm * 100 / pptable->FanMaximumRpm;
+ *speed = percent > 100 ? 100 : percent;
+
+ return ret;
+}
+
+static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ static SmuMetrics_t metrics;
+ int ret = 0, clk_id = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return -EINVAL;
+
+ ret = arcturus_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
+
+ switch (clk_id) {
+ case PPCLK_GFXCLK:
+ /*
+ * CurrClock[clk_id] can provide accurate
+ * output only when the dpm feature is enabled.
+ * We can use Average_* for dpm disabled case.
+ * But this is available for gfxclk/uclk/socclk.
+ */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_GFXCLK];
+ else
+ *value = metrics.AverageGfxclkFrequency;
+ break;
+ case PPCLK_UCLK:
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_UCLK];
+ else
+ *value = metrics.AverageUclkFrequency;
+ break;
+ case PPCLK_SOCCLK:
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
+ *value = metrics.CurrClock[PPCLK_SOCCLK];
+ else
+ *value = metrics.AverageSocclkFrequency;
+ break;
+ default:
+ *value = metrics.CurrClock[clk_id];
+ break;
+ }
+
+ return ret;
+}
+
+static uint32_t arcturus_find_lowest_dpm_level(struct arcturus_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static uint32_t arcturus_find_highest_dpm_level(struct arcturus_single_dpm_table *table)
+{
+ int i = 0;
+
+ if (table->count <= 0) {
+ pr_err("[%s] DPM Table has no entry!", __func__);
+ return 0;
+ }
+ if (table->count > MAX_DPM_NUMBER) {
+ pr_err("[%s] DPM Table has too many entries!", __func__);
+ return MAX_DPM_NUMBER - 1;
+ }
+
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+
+
+static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ uint32_t soft_level;
+ int ret = 0;
+
+ /* gfxclk */
+ if (highest)
+ soft_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table));
+ else
+ soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table));
+
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_level].value;
+
+ /* uclk */
+ if (highest)
+ soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
+ else
+ soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
+
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->mem_table.dpm_levels[soft_level].value;
+
+ /* socclk */
+ if (highest)
+ soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
+ else
+ soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
+
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload boot level to %s!\n",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload dpm max level to %s!\n!",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int arcturus_unforce_dpm_levels(struct smu_context *smu)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ /* gfxclk */
+ soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table));
+ soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table));
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ /* uclk */
+ soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
+ soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ /* socclk */
+ soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
+ soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_levels[soft_min_level].value;
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_max_level].value;
+
+ ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Bootup Levels!");
+ return ret;
+ }
+
+ ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Max Levels!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+arcturus_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+ struct arcturus_dpm_table *dpm_table =
+ (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct arcturus_single_dpm_table *gfx_dpm_table;
+ struct arcturus_single_dpm_table *mem_dpm_table;
+ struct arcturus_single_dpm_table *soc_dpm_table;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ gfx_dpm_table = &dpm_table->gfx_table;
+ mem_dpm_table = &dpm_table->mem_table;
+ soc_dpm_table = &dpm_table->soc_table;
+
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL;
+ *mclk_mask = ARCTURUS_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
+ }
+
+ return 0;
+}
+
+static int arcturus_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool asic_default)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t asic_default_power_limit = 0;
+ int ret = 0;
+ int power_src;
+
+ if (!smu->default_power_limit ||
+ !smu->power_limit) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ power_src << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &asic_default_power_limit);
+ } else {
+ /* the last hope to figure out the ppt limit */
+ if (!pptable) {
+ pr_err("Cannot get PPT limit due to pptable missing!");
+ return -EINVAL;
+ }
+ asic_default_power_limit =
+ pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+ }
+
+ if (smu->od_enabled) {
+ asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
+ asic_default_power_limit /= 100;
+ }
+
+ smu->default_power_limit = asic_default_power_limit;
+ smu->power_limit = asic_default_power_limit;
+ }
+
+ if (asic_default)
+ *limit = smu->default_power_limit;
+ else
+ *limit = smu->power_limit;
+
+ return 0;
+}
+
+static int arcturus_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
+static int arcturus_set_power_profile_mode(struct smu_context *smu,
+ long *input,
+ uint32_t size)
+{
+ int workload_type = 0;
+ uint32_t profile_mode = input[size];
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EINVAL;
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on arcturus\n", profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static void arcturus_dump_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int i;
+
+ pr_info("Dumped PPTable:\n");
+
+ pr_info("Version = 0x%08x\n", pptable->Version);
+
+ pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
+ pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
+
+ for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
+ pr_info("SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]);
+ pr_info("SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]);
+ }
+
+ pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
+ pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
+ pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
+ pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
+
+ pr_info("TedgeLimit = %d\n", pptable->TedgeLimit);
+ pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit);
+ pr_info("TmemLimit = %d\n", pptable->TmemLimit);
+ pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
+ pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
+ pr_info("Tvr_socLimit = %d\n", pptable->Tvr_socLimit);
+ pr_info("FitLimit = %d\n", pptable->FitLimit);
+
+ pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
+ pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
+
+ pr_info("ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask);
+
+ pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
+ pr_info("UlvPadding = 0x%08x\n", pptable->UlvPadding);
+
+ pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
+ pr_info("Padding234[0] = 0x%02x\n", pptable->Padding234[0]);
+ pr_info("Padding234[1] = 0x%02x\n", pptable->Padding234[1]);
+ pr_info("Padding234[2] = 0x%02x\n", pptable->Padding234[2]);
+
+ pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
+ pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
+ pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
+ pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
+
+ pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
+ pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
+
+ pr_info("[PPCLK_GFXCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
+
+ pr_info("[PPCLK_VCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_VCLK].padding,
+ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_VCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_VCLK].Padding16);
+
+ pr_info("[PPCLK_DCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_DCLK].padding,
+ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_DCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_DCLK].Padding16);
+
+ pr_info("[PPCLK_SOCCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
+
+ pr_info("[PPCLK_UCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_UCLK].padding,
+ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
+
+ pr_info("[PPCLK_FCLK]\n"
+ " .VoltageMode = 0x%02x\n"
+ " .SnapToDiscrete = 0x%02x\n"
+ " .NumDiscreteLevels = 0x%02x\n"
+ " .padding = 0x%02x\n"
+ " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
+ " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
+ " .SsFmin = 0x%04x\n"
+ " .Padding_16 = 0x%04x\n",
+ pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
+ pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
+ pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
+ pptable->DpmDescriptor[PPCLK_FCLK].padding,
+ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
+ pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
+ pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
+ pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
+
+
+ pr_info("FreqTableGfx\n");
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
+
+ pr_info("FreqTableVclk\n");
+ for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
+
+ pr_info("FreqTableDclk\n");
+ for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
+
+ pr_info("FreqTableSocclk\n");
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
+
+ pr_info("FreqTableUclk\n");
+ for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
+
+ pr_info("FreqTableFclk\n");
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
+ pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
+
+ pr_info("Mp0clkFreq\n");
+ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
+
+ pr_info("Mp0DpmVoltage\n");
+ for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
+
+ pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
+ pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
+ pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]);
+ pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]);
+ pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]);
+ pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]);
+ pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
+ pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource);
+ pr_info("Padding456 = 0x%x\n", pptable->Padding456);
+
+ pr_info("EnableTdpm = %d\n", pptable->EnableTdpm);
+ pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
+ pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
+ pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
+
+ pr_info("FanStopTemp = %d\n", pptable->FanStopTemp);
+ pr_info("FanStartTemp = %d\n", pptable->FanStartTemp);
+
+ pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
+ pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
+ pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
+ pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
+ pr_info("FanGainVrMem = %d\n", pptable->FanGainVrMem);
+ pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
+
+ pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
+ pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
+ pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
+ pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
+ pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
+ pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
+ pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
+ pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
+ pr_info("FanTempInputSelect = %d\n", pptable->FanTempInputSelect);
+
+ pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
+ pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
+ pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
+ pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
+
+ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
+ pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
+ pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
+ pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
+
+ pr_info("dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbGfxPll.a,
+ pptable->dBtcGbGfxPll.b,
+ pptable->dBtcGbGfxPll.c);
+ pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbGfxAfll.a,
+ pptable->dBtcGbGfxAfll.b,
+ pptable->dBtcGbGfxAfll.c);
+ pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->dBtcGbSoc.a,
+ pptable->dBtcGbSoc.b,
+ pptable->dBtcGbSoc.c);
+
+ pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
+ pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
+ pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
+ pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
+ pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
+ pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
+
+ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
+ pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
+ pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
+
+ pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
+ pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
+
+ pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
+ pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
+ pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
+
+ pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
+ pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
+
+ pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
+
+ pr_info("XgmiDpmPstates\n");
+ for (i = 0; i < NUM_XGMI_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]);
+ pr_info("XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
+ pr_info("XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
+
+ pr_info("VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin);
+ pr_info("VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin);
+ pr_info("VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp);
+ pr_info("VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp);
+ pr_info("VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp);
+ pr_info("VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp);
+ pr_info("VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis);
+ pr_info("VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis);
+
+ pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides);
+ pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation0.a,
+ pptable->ReservedEquation0.b,
+ pptable->ReservedEquation0.c);
+ pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation1.a,
+ pptable->ReservedEquation1.b,
+ pptable->ReservedEquation1.c);
+ pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation2.a,
+ pptable->ReservedEquation2.b,
+ pptable->ReservedEquation2.c);
+ pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
+ pptable->ReservedEquation3.a,
+ pptable->ReservedEquation3.b,
+ pptable->ReservedEquation3.c);
+
+ pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
+ pr_info("PaddingUlv = %d\n", pptable->PaddingUlv);
+
+ pr_info("TotalPowerConfig = %d\n", pptable->TotalPowerConfig);
+ pr_info("TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1);
+ pr_info("TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2);
+
+ pr_info("PccThresholdLow = %d\n", pptable->PccThresholdLow);
+ pr_info("PccThresholdHigh = %d\n", pptable->PccThresholdHigh);
+
+ pr_info("Board Parameters:\n");
+ pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
+ pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
+
+ pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
+ pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
+ pr_info("VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping);
+ pr_info("BoardVrMapping = 0x%x\n", pptable->BoardVrMapping);
+
+ pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
+ pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
+
+ pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
+ pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset);
+ pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
+
+ pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
+ pr_info("SocOffset = 0x%x\n", pptable->SocOffset);
+ pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
+
+ pr_info("MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent);
+ pr_info("MemOffset = 0x%x\n", pptable->MemOffset);
+ pr_info("Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem);
+
+ pr_info("BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent);
+ pr_info("BoardOffset = 0x%x\n", pptable->BoardOffset);
+ pr_info("Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput);
+
+ pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio);
+ pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
+ pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio);
+ pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
+
+ pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
+ pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
+ pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
+
+ pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
+ pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
+ pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
+
+ pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
+ pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
+ pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
+
+ pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
+ pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
+ pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
+
+ for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
+ pr_info("I2cControllers[%d]:\n", i);
+ pr_info(" .Enabled = %d\n",
+ pptable->I2cControllers[i].Enabled);
+ pr_info(" .SlaveAddress = 0x%x\n",
+ pptable->I2cControllers[i].SlaveAddress);
+ pr_info(" .ControllerPort = %d\n",
+ pptable->I2cControllers[i].ControllerPort);
+ pr_info(" .ControllerName = %d\n",
+ pptable->I2cControllers[i].ControllerName);
+ pr_info(" .ThermalThrottler = %d\n",
+ pptable->I2cControllers[i].ThermalThrotter);
+ pr_info(" .I2cProtocol = %d\n",
+ pptable->I2cControllers[i].I2cProtocol);
+ pr_info(" .Speed = %d\n",
+ pptable->I2cControllers[i].Speed);
+ }
+
+ pr_info("MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled);
+ pr_info("DramBitWidth = %d\n", pptable->DramBitWidth);
+
+ pr_info("TotalBoardPower = %d\n", pptable->TotalBoardPower);
+
+ pr_info("XgmiLinkSpeed\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
+ pr_info("XgmiLinkWidth\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
+ pr_info("XgmiFclkFreq\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
+ pr_info("XgmiSocVoltage\n");
+ for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
+ pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
+
+}
+
+static bool arcturus_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t feature_mask[2];
+ unsigned long feature_enabled;
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32));
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static const struct pptable_funcs arcturus_ppt_funcs = {
+ /* translate smu index into arcturus specific index */
+ .get_smu_msg_index = arcturus_get_smu_msg_index,
+ .get_smu_clk_index = arcturus_get_smu_clk_index,
+ .get_smu_feature_index = arcturus_get_smu_feature_index,
+ .get_smu_table_index = arcturus_get_smu_table_index,
+ .get_smu_power_index= arcturus_get_pwr_src_index,
+ .get_workload_type = arcturus_get_workload_type,
+ /* internal structurs allocations */
+ .tables_init = arcturus_tables_init,
+ .alloc_dpm_context = arcturus_allocate_dpm_context,
+ /* pptable related */
+ .check_powerplay_table = arcturus_check_powerplay_table,
+ .store_powerplay_table = arcturus_store_powerplay_table,
+ .append_powerplay_table = arcturus_append_powerplay_table,
+ /* init dpm */
+ .get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
+ /* btc */
+ .run_afll_btc = arcturus_run_btc_afll,
+ /* dpm/clk tables */
+ .set_default_dpm_table = arcturus_set_default_dpm_table,
+ .populate_umd_state_clk = arcturus_populate_umd_state_clk,
+ .get_thermal_temperature_range = arcturus_get_thermal_temperature_range,
+ .get_current_clk_freq_by_table = arcturus_get_current_clk_freq_by_table,
+ .print_clk_levels = arcturus_print_clk_levels,
+ .force_clk_levels = arcturus_force_clk_levels,
+ .read_sensor = arcturus_read_sensor,
+ .get_fan_speed_percent = arcturus_get_fan_speed_percent,
+ .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
+ .force_dpm_limit_value = arcturus_force_dpm_limit_value,
+ .unforce_dpm_levels = arcturus_unforce_dpm_levels,
+ .get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
+ .get_power_profile_mode = arcturus_get_power_profile_mode,
+ .set_power_profile_mode = arcturus_set_power_profile_mode,
+ /* debug (internal used) */
+ .dump_pptable = arcturus_dump_pptable,
+ .get_power_limit = arcturus_get_power_limit,
+ .is_dpm_running = arcturus_is_dpm_running,
+};
+
+void arcturus_set_ppt_funcs(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ smu->ppt_funcs = &arcturus_ppt_funcs;
+ smu_table->table_count = TABLE_COUNT;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h
new file mode 100644
index 000000000000..d756b16924b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ARCTURUS_PPT_H__
+#define __ARCTURUS_PPT_H__
+
+#define ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define ARCTURUS_UMD_PSTATE_MCLK_LEVEL 0x2
+
+#define MAX_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+
+struct arcturus_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+struct arcturus_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct arcturus_single_dpm_table {
+ uint32_t count;
+ struct arcturus_dpm_state dpm_state;
+ struct arcturus_dpm_level dpm_levels[MAX_DPM_NUMBER];
+};
+
+struct arcturus_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct arcturus_dpm_table {
+ struct arcturus_single_dpm_table soc_table;
+ struct arcturus_single_dpm_table gfx_table;
+ struct arcturus_single_dpm_table mem_table;
+ struct arcturus_single_dpm_table eclk_table;
+ struct arcturus_single_dpm_table vclk_table;
+ struct arcturus_single_dpm_table dclk_table;
+ struct arcturus_single_dpm_table fclk_table;
+ struct arcturus_pcie_table pcie_table;
+};
+
+extern void arcturus_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 18e780f566fa..1115761982a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1311,6 +1311,12 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
return 0;
}
+static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
+{
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DeviceDriverReset,
+ mode);
+}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
@@ -1355,6 +1361,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
.get_power_profile_mode = smu10_get_power_profile_mode,
.set_power_profile_mode = smu10_set_power_profile_mode,
+ .asic_reset = smu10_asic_reset,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 487aeee1cf8a..34f95e0e3ea4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2956,9 +2956,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
- disable_mclk_switching_for_frame_lock ||
- smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ disable_mclk_switching_for_frame_lock ||
+ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4068,6 +4069,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
data->frame_time_x2 = frame_time_in_us * 2 / 100;
+ if (data->frame_time_x2 < 280) {
+ pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
+ data->frame_time_x2 = 280;
+ }
+
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 3be8eb21fd6e..d08493b67b67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3220,7 +3220,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
@@ -5219,6 +5220,30 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_SHUTDOWN:
+ case PP_MP1_STATE_RESET:
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
@@ -5308,6 +5333,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
+ .set_mp1_state = vega10_set_mp1_state,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index efb6d3762feb..7af9ad450ac4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2639,6 +2639,30 @@ static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_SHUTDOWN:
+ case PP_MP1_STATE_RESET:
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2695,7 +2719,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
-
+ .set_mp1_state = vega12_set_mp1_state,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 90c4e87ac5ad..f5915308e643 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -3115,6 +3115,34 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = PPSMC_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = PPSMC_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = PPSMC_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ "[PrepareMp1] Failed!",
+ return ret);
+
+ return 0;
+}
+
static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
{
static const char *ppfeature_name[] = {
@@ -4109,6 +4137,24 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
+{
+ int res;
+
+ /* I2C bus access can happen very early, when SMU not loaded yet */
+ if (!vega20_is_smc_ram_running(hwmgr))
+ return 0;
+
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ (acquire ?
+ PPSMC_MSG_RequestI2CBus :
+ PPSMC_MSG_ReleaseI2CBus),
+ 0);
+
+ PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
+ return res;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4175,6 +4221,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.get_asic_baco_capability = vega20_baco_get_capability,
.get_asic_baco_state = vega20_baco_get_state,
.set_asic_baco_state = vega20_baco_set_state,
+ .set_mp1_state = vega20_set_mp1_state,
+ .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a78b2e295895..6109815a0401 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -26,6 +26,7 @@
#include "kgd_pp_interface.h"
#include "dm_pp_interface.h"
#include "dm_pp_smu.h"
+#include "smu_types.h"
#define SMU_THERMAL_MINIMUM_ALERT_TEMP 0
#define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255
@@ -150,124 +151,6 @@ struct smu_power_state {
struct smu_hw_power_state hardware;
};
-enum smu_message_type
-{
- SMU_MSG_TestMessage = 0,
- SMU_MSG_GetSmuVersion,
- SMU_MSG_GetDriverIfVersion,
- SMU_MSG_SetAllowedFeaturesMaskLow,
- SMU_MSG_SetAllowedFeaturesMaskHigh,
- SMU_MSG_EnableAllSmuFeatures,
- SMU_MSG_DisableAllSmuFeatures,
- SMU_MSG_EnableSmuFeaturesLow,
- SMU_MSG_EnableSmuFeaturesHigh,
- SMU_MSG_DisableSmuFeaturesLow,
- SMU_MSG_DisableSmuFeaturesHigh,
- SMU_MSG_GetEnabledSmuFeaturesLow,
- SMU_MSG_GetEnabledSmuFeaturesHigh,
- SMU_MSG_SetWorkloadMask,
- SMU_MSG_SetPptLimit,
- SMU_MSG_SetDriverDramAddrHigh,
- SMU_MSG_SetDriverDramAddrLow,
- SMU_MSG_SetToolsDramAddrHigh,
- SMU_MSG_SetToolsDramAddrLow,
- SMU_MSG_TransferTableSmu2Dram,
- SMU_MSG_TransferTableDram2Smu,
- SMU_MSG_UseDefaultPPTable,
- SMU_MSG_UseBackupPPTable,
- SMU_MSG_RunBtc,
- SMU_MSG_RequestI2CBus,
- SMU_MSG_ReleaseI2CBus,
- SMU_MSG_SetFloorSocVoltage,
- SMU_MSG_SoftReset,
- SMU_MSG_StartBacoMonitor,
- SMU_MSG_CancelBacoMonitor,
- SMU_MSG_EnterBaco,
- SMU_MSG_SetSoftMinByFreq,
- SMU_MSG_SetSoftMaxByFreq,
- SMU_MSG_SetHardMinByFreq,
- SMU_MSG_SetHardMaxByFreq,
- SMU_MSG_GetMinDpmFreq,
- SMU_MSG_GetMaxDpmFreq,
- SMU_MSG_GetDpmFreqByIndex,
- SMU_MSG_GetDpmClockFreq,
- SMU_MSG_GetSsVoltageByDpm,
- SMU_MSG_SetMemoryChannelConfig,
- SMU_MSG_SetGeminiMode,
- SMU_MSG_SetGeminiApertureHigh,
- SMU_MSG_SetGeminiApertureLow,
- SMU_MSG_SetMinLinkDpmByIndex,
- SMU_MSG_OverridePcieParameters,
- SMU_MSG_OverDriveSetPercentage,
- SMU_MSG_SetMinDeepSleepDcefclk,
- SMU_MSG_ReenableAcDcInterrupt,
- SMU_MSG_NotifyPowerSource,
- SMU_MSG_SetUclkFastSwitch,
- SMU_MSG_SetUclkDownHyst,
- SMU_MSG_GfxDeviceDriverReset,
- SMU_MSG_GetCurrentRpm,
- SMU_MSG_SetVideoFps,
- SMU_MSG_SetTjMax,
- SMU_MSG_SetFanTemperatureTarget,
- SMU_MSG_PrepareMp1ForUnload,
- SMU_MSG_DramLogSetDramAddrHigh,
- SMU_MSG_DramLogSetDramAddrLow,
- SMU_MSG_DramLogSetDramSize,
- SMU_MSG_SetFanMaxRpm,
- SMU_MSG_SetFanMinPwm,
- SMU_MSG_ConfigureGfxDidt,
- SMU_MSG_NumOfDisplays,
- SMU_MSG_RemoveMargins,
- SMU_MSG_ReadSerialNumTop32,
- SMU_MSG_ReadSerialNumBottom32,
- SMU_MSG_SetSystemVirtualDramAddrHigh,
- SMU_MSG_SetSystemVirtualDramAddrLow,
- SMU_MSG_WaflTest,
- SMU_MSG_SetFclkGfxClkRatio,
- SMU_MSG_AllowGfxOff,
- SMU_MSG_DisallowGfxOff,
- SMU_MSG_GetPptLimit,
- SMU_MSG_GetDcModeMaxDpmFreq,
- SMU_MSG_GetDebugData,
- SMU_MSG_SetXgmiMode,
- SMU_MSG_RunAfllBtc,
- SMU_MSG_ExitBaco,
- SMU_MSG_PrepareMp1ForReset,
- SMU_MSG_PrepareMp1ForShutdown,
- SMU_MSG_SetMGpuFanBoostLimitRpm,
- SMU_MSG_GetAVFSVoltageByDpm,
- SMU_MSG_PowerUpVcn,
- SMU_MSG_PowerDownVcn,
- SMU_MSG_PowerUpJpeg,
- SMU_MSG_PowerDownJpeg,
- SMU_MSG_BacoAudioD3PME,
- SMU_MSG_ArmD3,
- SMU_MSG_MAX_COUNT,
-};
-
-enum smu_clk_type
-{
- SMU_GFXCLK,
- SMU_VCLK,
- SMU_DCLK,
- SMU_ECLK,
- SMU_SOCCLK,
- SMU_UCLK,
- SMU_DCEFCLK,
- SMU_DISPCLK,
- SMU_PIXCLK,
- SMU_PHYCLK,
- SMU_FCLK,
- SMU_SCLK,
- SMU_MCLK,
- SMU_PCIE,
- SMU_OD_SCLK,
- SMU_OD_MCLK,
- SMU_OD_VDDC_CURVE,
- SMU_OD_RANGE,
- SMU_CLK_COUNT,
-};
-
enum smu_power_src_type
{
SMU_POWER_SOURCE_AC,
@@ -275,63 +158,6 @@ enum smu_power_src_type
SMU_POWER_SOURCE_COUNT,
};
-enum smu_feature_mask
-{
- SMU_FEATURE_DPM_PREFETCHER_BIT,
- SMU_FEATURE_DPM_GFXCLK_BIT,
- SMU_FEATURE_DPM_UCLK_BIT,
- SMU_FEATURE_DPM_SOCCLK_BIT,
- SMU_FEATURE_DPM_UVD_BIT,
- SMU_FEATURE_DPM_VCE_BIT,
- SMU_FEATURE_ULV_BIT,
- SMU_FEATURE_DPM_MP0CLK_BIT,
- SMU_FEATURE_DPM_LINK_BIT,
- SMU_FEATURE_DPM_DCEFCLK_BIT,
- SMU_FEATURE_DS_GFXCLK_BIT,
- SMU_FEATURE_DS_SOCCLK_BIT,
- SMU_FEATURE_DS_LCLK_BIT,
- SMU_FEATURE_PPT_BIT,
- SMU_FEATURE_TDC_BIT,
- SMU_FEATURE_THERMAL_BIT,
- SMU_FEATURE_GFX_PER_CU_CG_BIT,
- SMU_FEATURE_RM_BIT,
- SMU_FEATURE_DS_DCEFCLK_BIT,
- SMU_FEATURE_ACDC_BIT,
- SMU_FEATURE_VR0HOT_BIT,
- SMU_FEATURE_VR1HOT_BIT,
- SMU_FEATURE_FW_CTF_BIT,
- SMU_FEATURE_LED_DISPLAY_BIT,
- SMU_FEATURE_FAN_CONTROL_BIT,
- SMU_FEATURE_GFX_EDC_BIT,
- SMU_FEATURE_GFXOFF_BIT,
- SMU_FEATURE_CG_BIT,
- SMU_FEATURE_DPM_FCLK_BIT,
- SMU_FEATURE_DS_FCLK_BIT,
- SMU_FEATURE_DS_MP1CLK_BIT,
- SMU_FEATURE_DS_MP0CLK_BIT,
- SMU_FEATURE_XGMI_BIT,
- SMU_FEATURE_DPM_GFX_PACE_BIT,
- SMU_FEATURE_MEM_VDDCI_SCALING_BIT,
- SMU_FEATURE_MEM_MVDD_SCALING_BIT,
- SMU_FEATURE_DS_UCLK_BIT,
- SMU_FEATURE_GFX_ULV_BIT,
- SMU_FEATURE_FW_DSTATE_BIT,
- SMU_FEATURE_BACO_BIT,
- SMU_FEATURE_VCN_PG_BIT,
- SMU_FEATURE_JPEG_PG_BIT,
- SMU_FEATURE_USB_PG_BIT,
- SMU_FEATURE_RSMU_SMN_CG_BIT,
- SMU_FEATURE_APCC_PLUS_BIT,
- SMU_FEATURE_GTHR_BIT,
- SMU_FEATURE_GFX_DCS_BIT,
- SMU_FEATURE_GFX_SS_BIT,
- SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT,
- SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT,
- SMU_FEATURE_MMHUB_PG_BIT,
- SMU_FEATURE_ATHUB_PG_BIT,
- SMU_FEATURE_COUNT,
-};
-
enum smu_memory_pool_size
{
SMU_MEMORY_POOL_SIZE_ZERO = 0,
@@ -396,12 +222,17 @@ struct smu_bios_boot_up_values
uint16_t vdd_gfx;
uint8_t cooling_id;
uint32_t pp_table_id;
+ uint32_t format_revision;
+ uint32_t content_revision;
+ uint32_t fclk;
};
enum smu_table_id
{
SMU_TABLE_PPTABLE = 0,
SMU_TABLE_WATERMARKS,
+ SMU_TABLE_CUSTOM_DPM,
+ SMU_TABLE_DPMCLOCKS,
SMU_TABLE_AVFS,
SMU_TABLE_AVFS_PSM_DEBUG,
SMU_TABLE_AVFS_FUSE_OVERRIDE,
@@ -422,6 +253,7 @@ struct smu_table_context
void *hardcode_pptable;
unsigned long metrics_time;
void *metrics_table;
+ void *clocks_table;
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
@@ -540,6 +372,8 @@ struct smu_context
#define WATERMARKS_EXIST (1 << 0)
#define WATERMARKS_LOADED (1 << 1)
uint32_t watermarks_bitmap;
+ uint32_t hard_min_uclk_req_from_dal;
+ bool disable_uclk_switch;
uint32_t workload_mask;
uint32_t workload_prority[WORKLOAD_POLICY_MAX];
@@ -607,8 +441,6 @@ struct pptable_funcs {
uint32_t *mclk_mask,
uint32_t *soc_mask);
int (*set_cpu_power_state)(struct smu_context *smu);
- int (*set_ppfeature_status)(struct smu_context *smu, uint64_t ppfeatures);
- int (*get_ppfeature_status)(struct smu_context *smu, char *buf);
bool (*is_dpm_running)(struct smu_context *smu);
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
int (*set_thermal_fan_table)(struct smu_context *smu);
@@ -623,6 +455,10 @@ struct pptable_funcs {
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+ int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+ void (*dump_pptable)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
+ int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
};
struct smu_funcs
@@ -639,8 +475,11 @@ struct smu_funcs
int (*get_clk_info_from_vbios)(struct smu_context *smu);
int (*check_pptable)(struct smu_context *smu);
int (*parse_pptable)(struct smu_context *smu);
- int (*populate_smc_pptable)(struct smu_context *smu);
+ int (*populate_smc_tables)(struct smu_context *smu);
int (*check_fw_version)(struct smu_context *smu);
+ int (*powergate_sdma)(struct smu_context *smu, bool gate);
+ int (*powergate_vcn)(struct smu_context *smu, bool gate);
+ int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
int (*write_pptable)(struct smu_context *smu);
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
@@ -654,9 +493,7 @@ struct smu_funcs
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
- int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled);
int (*notify_display_change)(struct smu_context *smu);
- int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def);
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
@@ -700,7 +537,7 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
-
+ int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
};
#define smu_init_microcode(smu) \
@@ -719,6 +556,12 @@ struct smu_funcs
((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
#define smu_setup_pptable(smu) \
((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
+#define smu_set_gfx_cgpg(smu, enabled) \
+ ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
#define smu_get_vbios_bootup_values(smu) \
((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
#define smu_get_clk_info_from_vbios(smu) \
@@ -727,8 +570,8 @@ struct smu_funcs
((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
#define smu_parse_pptable(smu) \
((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_pptable(smu) \
- ((smu)->funcs->populate_smc_pptable ? (smu)->funcs->populate_smc_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0)
#define smu_check_fw_version(smu) \
((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
#define smu_write_pptable(smu) \
@@ -770,8 +613,6 @@ struct smu_funcs
((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
#define smu_is_dpm_running(smu) \
((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_feature_update_enable_state(smu, feature_id, enabled) \
- ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0)
#define smu_notify_display_change(smu) \
((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
#define smu_store_powerplay_table(smu) \
@@ -787,7 +628,7 @@ struct smu_funcs
#define smu_set_default_od8_settings(smu) \
((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
#define smu_get_power_limit(smu, limit, def) \
- ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0)
+ ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
#define smu_set_power_limit(smu, limit) \
((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
#define smu_get_current_clk_freq(smu, clk_id, value) \
@@ -809,9 +650,9 @@ struct smu_funcs
#define smu_start_thermal_control(smu) \
((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_asic_read_sensor(smu, sensor, data, size) \
((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
#define smu_get_power_profile_mode(smu, buf) \
((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
#define smu_set_power_profile_mode(smu, param, param_size) \
@@ -875,6 +716,8 @@ struct smu_funcs
((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
#define smu_display_clock_voltage_request(smu, clock_req) \
((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
+#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \
+ ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
#define smu_get_dal_power_level(smu, clocks) \
((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
#define smu_get_perf_level(smu, designation, level) \
@@ -891,10 +734,6 @@ struct smu_funcs
((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
#define smu_set_xgmi_pstate(smu, pstate) \
((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_ppfeature_status(smu, ppfeatures) \
- ((smu)->ppt_funcs->set_ppfeature_status ? (smu)->ppt_funcs->set_ppfeature_status((smu), (ppfeatures)) : -EINVAL)
-#define smu_get_ppfeature_status(smu, buf) \
- ((smu)->ppt_funcs->get_ppfeature_status ? (smu)->ppt_funcs->get_ppfeature_status((smu), (buf)) : -EINVAL)
#define smu_set_watermarks_table(smu, tab, clock_ranges) \
((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
@@ -907,6 +746,8 @@ struct smu_funcs
((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
#define smu_set_azalia_d3_pme(smu) \
((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
@@ -919,6 +760,10 @@ struct smu_funcs
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
#define smu_asic_set_performance_level(smu, level) \
((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_uclk_limited(smu, clock, max) \
+ ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
@@ -928,6 +773,8 @@ extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
extern const struct amd_ip_funcs smu_ip_funcs;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
+
extern int smu_feature_init_dpm(struct smu_context *smu);
extern int smu_feature_is_enabled(struct smu_context *smu,
@@ -943,6 +790,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
void *table_data, bool drv2smu);
bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
@@ -961,6 +809,9 @@ extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, b
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
enum amd_pp_task task_id);
+int smu_switch_power_profile(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en);
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
uint16_t level, uint32_t *value);
@@ -976,5 +827,10 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
+int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
new file mode 100644
index 000000000000..78e5927b7711
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ARCTURUS_PP_SMC_H
+#define ARCTURUS_PP_SMC_H
+
+#pragma pack(push, 1)
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+#define PPSMC_MSG_UseBackupPPTable 0x15
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x18
+#define PPSMC_MSG_ExitBaco 0x19
+#define PPSMC_MSG_ArmD3 0x1A
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x1B
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1C
+#define PPSMC_MSG_SetHardMinByFreq 0x1D
+#define PPSMC_MSG_SetHardMaxByFreq 0x1E
+#define PPSMC_MSG_GetMinDpmFreq 0x1F
+#define PPSMC_MSG_GetMaxDpmFreq 0x20
+#define PPSMC_MSG_GetDpmFreqByIndex 0x21
+
+#define PPSMC_MSG_SetWorkloadMask 0x22
+#define PPSMC_MSG_SetDfSwitchType 0x23
+#define PPSMC_MSG_GetVoltageByDpm 0x24
+#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x25
+
+#define PPSMC_MSG_SetPptLimit 0x26
+#define PPSMC_MSG_GetPptLimit 0x27
+
+//Power Gating
+#define PPSMC_MSG_PowerUpVcn0 0x28
+#define PPSMC_MSG_PowerDownVcn0 0x29
+#define PPSMC_MSG_PowerUpVcn1 0x2A
+#define PPSMC_MSG_PowerDownVcn1 0x2B
+
+//Resets and reload
+#define PPSMC_MSG_PrepareMp1ForUnload 0x2C
+#define PPSMC_MSG_PrepareMp1ForReset 0x2D
+#define PPSMC_MSG_PrepareMp1ForShutdown 0x2E
+#define PPSMC_MSG_SoftReset 0x2F
+
+//BTC
+#define PPSMC_MSG_RunAfllBtc 0x30
+#define PPSMC_MSG_RunGfxDcBtc 0x31
+#define PPSMC_MSG_RunSocDcBtc 0x32
+
+//Debug
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x34
+#define PPSMC_MSG_DramLogSetDramSize 0x35
+#define PPSMC_MSG_GetDebugData 0x36
+
+//WAFL and XGMI
+#define PPSMC_MSG_WaflTest 0x37
+#define PPSMC_MSG_SetXgmiMode 0x38
+
+//Others
+#define PPSMC_MSG_SetMemoryChannelEnable 0x39
+
+#define PPSMC_Message_Count 0x3A
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_Msg;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c5989cb38b1b..7bf9a14bfa0b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -189,6 +189,14 @@ struct phm_vce_clock_voltage_dependency_table {
struct phm_vce_clock_voltage_dependency_record entries[1];
};
+
+enum SMU_ASIC_RESET_MODE
+{
+ SMU_ASIC_RESET_MODE_0,
+ SMU_ASIC_RESET_MODE_1,
+ SMU_ASIC_RESET_MODE_2,
+};
+
struct pp_smumgr_func {
char *name;
int (*smu_init)(struct pp_hwmgr *hwmgr);
@@ -344,6 +352,9 @@ struct pp_hwmgr_func {
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks);
+ int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
+ int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
+ int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 90879e4092a3..df4677da736c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -59,7 +59,7 @@
#define PPSMC_MSG_SetDriverDramAddrLow 0x1B
#define PPSMC_MSG_TransferTableSmu2Dram 0x1C
#define PPSMC_MSG_TransferTableDram2Smu 0x1D
-#define PPSMC_MSG_ControlGfxRM 0x1E
+#define PPSMC_MSG_DeviceDriverReset 0x1E
#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F
#define PPSMC_MSG_SetHardMinDcefclkByFreq 0x20
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 755d51f9c6a9..fdc6b7a57bc9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -27,7 +27,9 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x13
+// Be aware of that the version should be updated in
+// smu_v11_0.h, rename is also needed.
+// #define SMU11_DRIVER_IF_VERSION 0x13
#define PPTABLE_V20_SMU_VERSION 3
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
new file mode 100644
index 000000000000..e02950b505fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU11_DRIVER_IF_ARCTURUS_H
+#define SMU11_DRIVER_IF_ARCTURUS_H
+
+// *** IMPORTANT ***
+// SMU TEAM: Always increment the interface version if
+// any structure is changed in this file
+//#define SMU11_DRIVER_IF_VERSION 0x09
+
+#define PPTABLE_ARCTURUS_SMU_VERSION 4
+
+#define NUM_GFXCLK_DPM_LEVELS 16
+#define NUM_VCLK_DPM_LEVELS 8
+#define NUM_DCLK_DPM_LEVELS 8
+#define NUM_MP0CLK_DPM_LEVELS 2
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_XGMI_LEVELS 2
+#define NUM_XGMI_PSTATE_LEVELS 4
+
+#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1)
+#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1)
+#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1)
+#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1)
+#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1)
+#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1)
+#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1)
+#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1)
+#define MAX_XGMI_PSTATE_LEVEL (NUM_XGMI_PSTATE_LEVELS - 1)
+
+// Feature Control Defines
+// DPM
+#define FEATURE_DPM_PREFETCHER_BIT 0
+#define FEATURE_DPM_GFXCLK_BIT 1
+#define FEATURE_DPM_UCLK_BIT 2
+#define FEATURE_DPM_SOCCLK_BIT 3
+#define FEATURE_DPM_FCLK_BIT 4
+#define FEATURE_DPM_MP0CLK_BIT 5
+#define FEATURE_DPM_XGMI_BIT 6
+// Idle
+#define FEATURE_DS_GFXCLK_BIT 7
+#define FEATURE_DS_SOCCLK_BIT 8
+#define FEATURE_DS_LCLK_BIT 9
+#define FEATURE_DS_FCLK_BIT 10
+#define FEATURE_DS_UCLK_BIT 11
+#define FEATURE_GFX_ULV_BIT 12
+#define FEATURE_DPM_VCN_BIT 13
+#define FEATURE_RSMU_SMN_CG_BIT 14
+#define FEATURE_WAFL_CG_BIT 15
+// Throttler/Response
+#define FEATURE_PPT_BIT 16
+#define FEATURE_TDC_BIT 17
+#define FEATURE_APCC_PLUS_BIT 18
+#define FEATURE_VR0HOT_BIT 19
+#define FEATURE_VR1HOT_BIT 20
+#define FEATURE_FW_CTF_BIT 21
+#define FEATURE_FAN_CONTROL_BIT 22
+#define FEATURE_THERMAL_BIT 23
+// Other
+#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
+#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+
+#define FEATURE_SPARE_26_BIT 26
+#define FEATURE_SPARE_27_BIT 27
+#define FEATURE_SPARE_28_BIT 28
+#define FEATURE_SPARE_29_BIT 29
+#define FEATURE_SPARE_30_BIT 30
+#define FEATURE_SPARE_31_BIT 31
+#define FEATURE_SPARE_32_BIT 32
+#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_SPARE_34_BIT 34
+#define FEATURE_SPARE_35_BIT 35
+#define FEATURE_SPARE_36_BIT 36
+#define FEATURE_SPARE_37_BIT 37
+#define FEATURE_SPARE_38_BIT 38
+#define FEATURE_SPARE_39_BIT 39
+#define FEATURE_SPARE_40_BIT 40
+#define FEATURE_SPARE_41_BIT 41
+#define FEATURE_SPARE_42_BIT 42
+#define FEATURE_SPARE_43_BIT 43
+#define FEATURE_SPARE_44_BIT 44
+#define FEATURE_SPARE_45_BIT 45
+#define FEATURE_SPARE_46_BIT 46
+#define FEATURE_SPARE_47_BIT 47
+#define FEATURE_SPARE_48_BIT 48
+#define FEATURE_SPARE_49_BIT 49
+#define FEATURE_SPARE_50_BIT 50
+#define FEATURE_SPARE_51_BIT 51
+#define FEATURE_SPARE_52_BIT 52
+#define FEATURE_SPARE_53_BIT 53
+#define FEATURE_SPARE_54_BIT 54
+#define FEATURE_SPARE_55_BIT 55
+#define FEATURE_SPARE_56_BIT 56
+#define FEATURE_SPARE_57_BIT 57
+#define FEATURE_SPARE_58_BIT 58
+#define FEATURE_SPARE_59_BIT 59
+#define FEATURE_SPARE_60_BIT 60
+#define FEATURE_SPARE_61_BIT 61
+#define FEATURE_SPARE_62_BIT 62
+#define FEATURE_SPARE_63_BIT 63
+
+#define NUM_FEATURES 64
+
+
+#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT )
+#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT )
+#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT )
+#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT )
+#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT )
+#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT )
+#define FEATURE_DPM_XGMI_MASK (1 << FEATURE_DPM_XGMI_BIT )
+
+#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT )
+#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
+#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
+#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
+#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
+#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
+
+#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
+#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
+#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
+#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
+#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
+#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
+#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
+
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+
+
+//FIXME need updating
+// Debug Overrides Bitmask
+#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
+#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002
+
+// I2C Config Bit Defines
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+// VR Mapping Bit Defines
+#define VR_MAPPING_VR_SELECT_MASK 0x01
+#define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+#define VR_MAPPING_PLANE_SELECT_MASK 0x02
+#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
+
+// PSI Bit Defines
+#define PSI_SEL_VR0_PLANE0_PSI0 0x01
+#define PSI_SEL_VR0_PLANE0_PSI1 0x02
+#define PSI_SEL_VR0_PLANE1_PSI0 0x04
+#define PSI_SEL_VR0_PLANE1_PSI1 0x08
+#define PSI_SEL_VR1_PLANE0_PSI0 0x10
+#define PSI_SEL_VR1_PLANE0_PSI1 0x20
+#define PSI_SEL_VR1_PLANE1_PSI0 0x40
+#define PSI_SEL_VR1_PLANE1_PSI1 0x80
+
+// Throttler Control/Status Bits
+#define THROTTLER_PADDING_BIT 0
+#define THROTTLER_TEMP_EDGE_BIT 1
+#define THROTTLER_TEMP_HOTSPOT_BIT 2
+#define THROTTLER_TEMP_MEM_BIT 3
+#define THROTTLER_TEMP_VR_GFX_BIT 4
+#define THROTTLER_TEMP_VR_MEM_BIT 5
+#define THROTTLER_TEMP_VR_SOC_BIT 6
+#define THROTTLER_TDC_GFX_BIT 7
+#define THROTTLER_TDC_SOC_BIT 8
+#define THROTTLER_PPT0_BIT 9
+#define THROTTLER_PPT1_BIT 10
+#define THROTTLER_PPT2_BIT 11
+#define THROTTLER_PPT3_BIT 12
+#define THROTTLER_PPM_BIT 13
+#define THROTTLER_FIT_BIT 14
+#define THROTTLER_APCC_BIT 15
+
+// Table transfer status
+#define TABLE_TRANSFER_OK 0x0
+#define TABLE_TRANSFER_FAILED 0xFF
+#define TABLE_TRANSFER_PENDING 0xAB
+
+// Workload bits
+#define WORKLOAD_PPLIB_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 1
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_COMPUTE_BIT 3
+#define WORKLOAD_PPLIB_CUSTOM_BIT 4
+#define WORKLOAD_PPLIB_COUNT 5
+
+//XGMI performance states
+#define XGMI_STATE_D0 1
+#define XGMI_STATE_D3 0
+
+#define NUM_I2C_CONTROLLERS 8
+
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 8
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_MEM,
+ I2C_CONTROLLER_NAME_SPARE,
+ I2C_CONTROLLER_NAME_COUNT,
+} I2cControllerName_e;
+
+typedef enum {
+ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
+ I2C_CONTROLLER_THROTTLER_VR_GFX,
+ I2C_CONTROLLER_THROTTLER_VR_SOC,
+ I2C_CONTROLLER_THROTTLER_VR_MEM,
+ I2C_CONTROLLER_THROTTLER_COUNT,
+} I2cControllerThrottler_e;
+
+typedef enum {
+ I2C_CONTROLLER_PROTOCOL_VR_0,
+ I2C_CONTROLLER_PROTOCOL_VR_1,
+ I2C_CONTROLLER_PROTOCOL_TMP_0,
+ I2C_CONTROLLER_PROTOCOL_TMP_1,
+ I2C_CONTROLLER_PROTOCOL_SPARE_0,
+ I2C_CONTROLLER_PROTOCOL_SPARE_1,
+ I2C_CONTROLLER_PROTOCOL_COUNT,
+} I2cControllerProtocol_e;
+
+typedef struct {
+ uint8_t Enabled;
+ uint8_t Speed;
+ uint8_t Padding[2];
+ uint32_t SlaveAddress;
+ uint8_t ControllerPort;
+ uint8_t ControllerName;
+ uint8_t ThermalThrotter;
+ uint8_t I2cProtocol;
+} I2cControllerConfig_t;
+
+typedef enum {
+ I2C_PORT_SVD_SCL = 0,
+ I2C_PORT_GPIO,
+} I2cPort_e;
+
+typedef enum {
+ I2C_SPEED_FAST_50K = 0, //50 Kbits/s
+ I2C_SPEED_FAST_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
+ I2C_SPEED_HIGH_2M, //2.3 Mbits/s
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ = 0,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+
+typedef struct {
+ uint8_t RegisterAddr; ////only valid for write, ignored for read
+ uint8_t Cmd; //Read(0) or Write(1)
+ uint8_t Data; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Slow(0) or Fast(1)
+ uint16_t SlaveAddress;
+ uint8_t NumCmds; //Number of commands
+ uint8_t Padding[3];
+
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} SwI2cRequest_t; // SW I2C Request Table
+
+//D3HOT sequences
+typedef enum {
+ BACO_SEQUENCE,
+ MSR_SEQUENCE,
+ BAMACO_SEQUENCE,
+ ULPS_SEQUENCE,
+ D3HOT_SEQUENCE_COUNT,
+}D3HOTSequence_e;
+
+//THis is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_DYNAMIC_MODE = 0,
+ PG_STATIC_MODE,
+} PowerGatingMode_e;
+
+//This is aligned with RSMU PGFSM Register Mapping
+typedef enum {
+ PG_POWER_DOWN = 0,
+ PG_POWER_UP,
+} PowerGatingSettings_e;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} QuadraticInt_t;
+
+typedef struct {
+ uint32_t m; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+} LinearInt_t;
+
+typedef struct {
+ uint32_t a; // store in IEEE float format in this variable
+ uint32_t b; // store in IEEE float format in this variable
+ uint32_t c; // store in IEEE float format in this variable
+} DroopInt_t;
+
+typedef enum {
+ GFXCLK_SOURCE_PLL = 0,
+ GFXCLK_SOURCE_AFLL,
+ GFXCLK_SOURCE_COUNT,
+} GfxclkSrc_e;
+
+typedef enum {
+ PPCLK_GFXCLK,
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_COUNT,
+} POWER_SOURCE_e;
+
+typedef enum {
+ TEMP_EDGE,
+ TEMP_HOTSPOT,
+ TEMP_MEM,
+ TEMP_VR_GFX,
+ TEMP_VR_SOC,
+ TEMP_VR_MEM,
+ TEMP_COUNT
+} TEMP_TYPE_e;
+
+typedef enum {
+ PPT_THROTTLER_PPT0,
+ PPT_THROTTLER_PPT1,
+ PPT_THROTTLER_PPT2,
+ PPT_THROTTLER_PPT3,
+ PPT_THROTTLER_COUNT
+} PPT_THROTTLER_e;
+
+typedef enum {
+ VOLTAGE_MODE_AVFS = 0,
+ VOLTAGE_MODE_AVFS_SS,
+ VOLTAGE_MODE_SS,
+ VOLTAGE_MODE_COUNT,
+} VOLTAGE_MODE_e;
+
+typedef enum {
+ AVFS_VOLTAGE_GFX = 0,
+ AVFS_VOLTAGE_SOC,
+ AVFS_VOLTAGE_COUNT,
+} AVFS_VOLTAGE_TYPE_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW = 0,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+typedef enum {
+ MEMORY_TYPE_GDDR6 = 0,
+ MEMORY_TYPE_HBM,
+} MemoryType_e;
+
+typedef enum {
+ PWR_CONFIG_TDP = 0,
+ PWR_CONFIG_TGP,
+ PWR_CONFIG_TCP_ESTIMATED,
+ PWR_CONFIG_TCP_MEASURED,
+} PwrConfig_e;
+
+typedef enum {
+ XGMI_LINK_RATE_12 = 0, // 12Gbps
+ XGMI_LINK_RATE_16, // 16Gbps
+ XGMI_LINK_RATE_22, // 22Gbps
+ XGMI_LINK_RATE_25, // 25Gbps
+ XGMI_LINK_RATE_COUNT
+} XGMI_LINK_RATE_e;
+
+typedef enum {
+ XGMI_LINK_WIDTH_2 = 0, // x2
+ XGMI_LINK_WIDTH_4, // x4
+ XGMI_LINK_WIDTH_8, // x8
+ XGMI_LINK_WIDTH_16, // x16
+ XGMI_LINK_WIDTH_COUNT
+} XGMI_LINK_WIDTH_e;
+
+typedef struct {
+ uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only
+ uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
+ uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
+ uint8_t padding;
+ LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
+ QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V)
+ uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin
+ uint16_t Padding16;
+} DpmDescriptor_t;
+
+typedef struct {
+ uint32_t Version;
+
+ // SECTION: Feature Enablement
+ uint32_t FeaturesToRun[2];
+
+ // SECTION: Infrastructure Limits
+ uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT];
+ uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT];
+ uint16_t TdcLimitSoc; // Amps
+ uint16_t TdcLimitSocTau; // Time constant of LPF in ms
+ uint16_t TdcLimitGfx; // Amps
+ uint16_t TdcLimitGfxTau; // Time constant of LPF in ms
+
+ uint16_t TedgeLimit; // Celcius
+ uint16_t ThotspotLimit; // Celcius
+ uint16_t TmemLimit; // Celcius
+ uint16_t Tvr_gfxLimit; // Celcius
+ uint16_t Tvr_memLimit; // Celcius
+ uint16_t Tvr_socLimit; // Celcius
+ uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime)
+
+ uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold
+ uint16_t PpmTemperatureThreshold;
+
+ // SECTION: Throttler settings
+ uint32_t ThrottlerControlMask; // See Throtter masks defines
+
+ // SECTION: ULV Settings
+ uint16_t UlvVoltageOffsetGfx; // In mV(Q2)
+ uint16_t UlvPadding; // Padding
+
+ uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV
+ uint8_t Padding234[3];
+
+ // SECTION: Voltage Control Parameters
+ uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX
+ uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC
+ uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX
+ uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC
+
+ uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits
+ uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits
+
+ //SECTION: DPM Config 1
+ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
+ uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
+
+ uint32_t Paddingclks[16];
+
+ // SECTION: DPM Config 2
+ uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz
+ uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2)
+
+ // GFXCLK DPM
+ uint16_t GfxclkFidle; // In MHz
+ uint16_t GfxclkSlewRate; // for PLL babystepping???
+ uint8_t Padding567[4];
+ uint16_t GfxclkDsMaxFreq; // In MHz
+ uint8_t GfxclkSource; // 0 = PLL, 1 = AFLL
+ uint8_t Padding456;
+
+ // GFXCLK Thermal DPM (formerly 'Boost' Settings)
+ uint16_t EnableTdpm;
+ uint16_t TdpmHighHystTemperature;
+ uint16_t TdpmLowHystTemperature;
+ uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability.
+
+ // SECTION: Fan Control
+ uint16_t FanStopTemp; //Celcius
+ uint16_t FanStartTemp; //Celcius
+
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+ uint16_t FanGainVrGfx;
+ uint16_t FanGainVrSoc;
+ uint16_t FanGainVrMem;
+ uint16_t FanGainHbm;
+ uint16_t FanPwmMin;
+ uint16_t FanAcousticLimitRpm;
+ uint16_t FanThrottlingRpm;
+ uint16_t FanMaximumRpm;
+ uint16_t FanTargetTemperature;
+ uint16_t FanTargetGfxclk;
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanTachEdgePerRev;
+ uint8_t FanTempInputSelect;
+ uint8_t padding8_Fan;
+
+ // The following are AFC override parameters. Leave at 0 to use FW defaults.
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t FuzzyFan_Reserved;
+
+
+ // SECTION: AVFS
+ // Overrides
+ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
+ uint8_t Padding8_Avfs[2];
+
+ QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve
+ DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb
+ DroopInt_t dBtcGbGfxAfll; // GHz->V BtcGb
+ DroopInt_t dBtcGbSoc; // GHz->V BtcGb
+ LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V
+
+ QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V
+
+ uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT];
+ uint8_t Padding8_GfxBtc[2];
+
+ uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2
+ uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2
+
+ // SECTION: XGMI
+ uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3.
+ uint8_t XgmiDpmSpare[2];
+
+ // Temperature Dependent Vmin
+ uint16_t VDDGFX_TVmin; //Celcius
+ uint16_t VDDSOC_TVmin; //Celcius
+ uint16_t VDDGFX_Vmin_HiTemp; // mV Q2
+ uint16_t VDDGFX_Vmin_LoTemp; // mV Q2
+ uint16_t VDDSOC_Vmin_HiTemp; // mV Q2
+ uint16_t VDDSOC_Vmin_LoTemp; // mV Q2
+
+ uint16_t VDDGFX_TVminHystersis; // Celcius
+ uint16_t VDDSOC_TVminHystersis; // Celcius
+
+
+ // SECTION: Advanced Options
+ uint32_t DebugOverrides;
+ QuadraticInt_t ReservedEquation0;
+ QuadraticInt_t ReservedEquation1;
+ QuadraticInt_t ReservedEquation2;
+ QuadraticInt_t ReservedEquation3;
+
+ uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode
+ uint16_t PaddingUlv; // Padding
+
+ // Total Power configuration, use defines from PwrConfig_e
+ uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured
+ uint8_t TotalPowerSpare1;
+ uint16_t TotalPowerSpare2;
+
+ // APCC Settings
+ uint16_t PccThresholdLow;
+ uint16_t PccThresholdHigh;
+ uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+
+ // SECTION: Reserved
+ uint32_t Reserved[11];
+
+ // SECTION: BOARD PARAMETERS
+
+ // SVI2 Board Parameters
+ uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+ uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMemVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t BoardVrMapping; // Use VR_MAPPING* bitfields
+
+ uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+ uint8_t Padding8_V[2];
+
+ // Telemetry Settings
+ uint16_t GfxMaxCurrent; // in Amps
+ int8_t GfxOffset; // in Amps
+ uint8_t Padding_TelemetryGfx;
+
+ uint16_t SocMaxCurrent; // in Amps
+ int8_t SocOffset; // in Amps
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t MemMaxCurrent; // in Amps
+ int8_t MemOffset; // in Amps
+ uint8_t Padding_TelemetryMem;
+
+ uint16_t BoardMaxCurrent; // in Amps
+ int8_t BoardOffset; // in Amps
+ uint8_t Padding_TelemetryBoardInput;
+
+ // GPIO Settings
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+ uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event
+ uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event
+
+ // GFXCLK PLL Spread Spectrum
+ uint8_t PllGfxclkSpreadEnabled; // on or off
+ uint8_t PllGfxclkSpreadPercent; // Q4.4
+ uint16_t PllGfxclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadEnabled; // on or off
+ uint8_t UclkSpreadPercent; // Q4.4
+ uint16_t UclkSpreadFreq; // kHz
+
+ // FCLK Spread Spectrum
+ uint8_t FclkSpreadEnabled; // on or off
+ uint8_t FclkSpreadPercent; // Q4.4
+ uint16_t FclkSpreadFreq; // kHz
+
+ // GFXCLK Fll Spread Spectrum
+ uint8_t FllGfxclkSpreadEnabled; // on or off
+ uint8_t FllGfxclkSpreadPercent; // Q4.4
+ uint16_t FllGfxclkSpreadFreq; // kHz
+
+ // I2C Controller Structure
+ I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
+
+ // Memory section
+ uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask.
+
+ uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines
+ uint8_t PaddingMem[3];
+
+ // Total board power
+ uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+ uint16_t BoardPadding;
+
+ // SECTION: XGMI Training
+ uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS];
+ uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS];
+
+ uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS];
+ uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS];
+
+ // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+ uint8_t GpioI2cScl; // Serial Clock
+ uint8_t GpioI2cSda; // Serial Data
+ uint16_t GpioPadding;
+
+ uint32_t BoardReserved[9];
+
+ // Padding for MMHUB - do not modify this
+ uint32_t MmHubPadding[8]; // SMU internal use
+
+} PPTable_t;
+
+typedef struct {
+ // Time constant parameters for clock averages in ms
+ uint16_t GfxclkAverageLpfTau;
+ uint16_t SocclkAverageLpfTau;
+ uint16_t UclkAverageLpfTau;
+ uint16_t GfxActivityLpfTau;
+ uint16_t UclkActivityLpfTau;
+
+ uint16_t SocketPowerLpfTau;
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DriverSmuConfig_t;
+
+typedef struct {
+ uint16_t CurrClock[PPCLK_COUNT];
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageUclkFrequency ;
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint8_t CurrSocVoltageOffset ;
+ uint8_t CurrGfxVoltageOffset ;
+ uint8_t CurrMemVidOffset ;
+ uint8_t Padding8 ;
+ uint16_t AverageSocketPower ;
+ uint16_t TemperatureEdge ;
+ uint16_t TemperatureHotspot ;
+ uint16_t TemperatureHBM ;
+ uint16_t TemperatureVrGfx ;
+ uint16_t TemperatureVrSoc ;
+ uint16_t TemperatureVrMem ;
+ uint32_t ThrottlerStatus ;
+
+ uint16_t CurrFanSpeed ;
+ uint16_t Padding16;
+
+ uint32_t Padding[4];
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SmuMetrics_t;
+
+
+typedef struct {
+ uint16_t avgPsmCount[75];
+ uint16_t minPsmCount[75];
+ float avgPsmVoltage[75];
+ float minPsmVoltage[75];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsDebugTable_t;
+
+typedef struct {
+ uint8_t AvfsVersion;
+ uint8_t Padding;
+ uint8_t AvfsEn[AVFS_VOLTAGE_COUNT];
+
+ uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT];
+
+ uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT];
+ uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT];
+
+ int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16
+ int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16
+ int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT];
+ uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT];
+ uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT];
+
+ uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits
+
+
+ int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24
+ int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12
+ int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32
+
+ uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
+
+ uint32_t EnabledAvfsModules[2];
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} AvfsFuseOverride_t;
+
+/* NOT CURRENTLY USED
+typedef struct {
+ uint8_t Gfx_ActiveHystLimit;
+ uint8_t Gfx_IdleHystLimit;
+ uint8_t Gfx_FPS;
+ uint8_t Gfx_MinActiveFreqType;
+ uint8_t Gfx_BoosterFreqType;
+ uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock.
+ uint8_t Gfx_UseRlcBusy;
+ uint8_t PaddingGfx[3];
+ uint16_t Gfx_MinActiveFreq; // MHz
+ uint16_t Gfx_BoosterFreq; // MHz
+ uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Gfx_PD_Data_limit_a; // Q16
+ uint32_t Gfx_PD_Data_limit_b; // Q16
+ uint32_t Gfx_PD_Data_limit_c; // Q16
+ uint32_t Gfx_PD_Data_error_coeff; // Q16
+ uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
+
+ uint8_t Mem_ActiveHystLimit;
+ uint8_t Mem_IdleHystLimit;
+ uint8_t Mem_FPS;
+ uint8_t Mem_MinActiveFreqType;
+ uint8_t Mem_BoosterFreqType;
+ uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock.
+ uint8_t Mem_UseRlcBusy;
+ uint8_t PaddingMem[3];
+ uint16_t Mem_MinActiveFreq; // MHz
+ uint16_t Mem_BoosterFreq; // MHz
+ uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms
+ uint32_t Mem_PD_Data_limit_a; // Q16
+ uint32_t Mem_PD_Data_limit_b; // Q16
+ uint32_t Mem_PD_Data_limit_c; // Q16
+ uint32_t Mem_PD_Data_error_coeff; // Q16
+ uint32_t Mem_PD_Data_error_rate_coeff; // Q16
+
+ uint32_t Mem_UpThreshold_Limit; // Q16
+ uint8_t Mem_UpHystLimit;
+ uint8_t Mem_DownHystLimit;
+ uint16_t Mem_Fps;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} DpmActivityMonitorCoeffInt_t;
+*/
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+#define TABLE_PPTABLE 0
+#define TABLE_AVFS 1
+#define TABLE_AVFS_PSM_DEBUG 2
+#define TABLE_AVFS_FUSE_OVERRIDE 3
+#define TABLE_PMSTATUSLOG 4
+#define TABLE_SMU_METRICS 5
+#define TABLE_DRIVER_SMU_CONFIG 6
+//#define TABLE_ACTIVITY_MONITOR_COEFF 7
+#define TABLE_OVERDRIVE 7
+#define TABLE_WAFL_XGMI_TOPOLOGY 8
+#define TABLE_COUNT 9
+
+// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
+typedef enum {
+ DF_SWITCH_TYPE_FAST = 0,
+ DF_SWITCH_TYPE_SLOW,
+ DF_SWITCH_TYPE_COUNT,
+} DF_SWITCH_TYPE_e;
+
+typedef enum {
+ DRAM_BIT_WIDTH_DISABLED = 0,
+ DRAM_BIT_WIDTH_X_8,
+ DRAM_BIT_WIDTH_X_16,
+ DRAM_BIT_WIDTH_X_32,
+ DRAM_BIT_WIDTH_X_64, // NOT USED.
+ DRAM_BIT_WIDTH_X_128,
+ DRAM_BIT_WIDTH_COUNT,
+} DRAM_BIT_WIDTH_TYPE_e;
+
+#define REMOVE_FMAX_MARGIN_BIT 0x0
+#define REMOVE_DCTOL_MARGIN_BIT 0x1
+#define REMOVE_PLATFORM_MARGIN_BIT 0x2
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index adbbfebbb1e5..ac0120e384be 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -26,7 +26,9 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x33
+// Be aware of that the version should be updated in
+// smu_v11_0.h, maybe rename is also needed.
+// #define SMU11_DRIVER_IF_VERSION 0x33
#define PPTABLE_NV10_SMU_VERSION 8
@@ -504,10 +506,11 @@ typedef struct {
uint32_t Status;
uint16_t DieTemperature;
- uint16_t MemoryTemperature;
+ uint16_t CurrentMemoryTemperature;
- uint16_t SelectedCardPower;
- uint16_t Reserved4;
+ uint16_t MemoryTemperature;
+ uint8_t MemoryHotspotPosition;
+ uint8_t Reserved4;
uint32_t BoardLevelEnergyAccumulator;
} OutOfBandMonitor_t;
@@ -799,7 +802,12 @@ typedef struct {
// Mvdd Svi2 Div Ratio Setting
uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
- uint32_t BoardReserved[9];
+ uint8_t RenesesLoadLineEnabled;
+ uint8_t GfxLoadlineResistance;
+ uint8_t SocLoadlineResistance;
+ uint8_t Padding8_Loadline;
+
+ uint32_t BoardReserved[8];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -903,13 +911,22 @@ typedef struct {
} Watermarks_t;
typedef struct {
+ uint16_t avgPsmCount[28];
+ uint16_t minPsmCount[28];
+ float avgPsmVoltage[28];
+ float minPsmVoltage[28];
+
+ uint32_t MmHubPadding[32]; // SMU internal use
+} AvfsDebugTable_t_NV14;
+
+typedef struct {
uint16_t avgPsmCount[36];
uint16_t minPsmCount[36];
float avgPsmVoltage[36];
float minPsmVoltage[36];
uint32_t MmHubPadding[8]; // SMU internal use
-} AvfsDebugTable_t;
+} AvfsDebugTable_t_NV10;
typedef struct {
uint8_t AvfsVersion;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
new file mode 100644
index 000000000000..c27c82851468
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU12_DRIVER_IF_H
+#define SMU12_DRIVER_IF_H
+
+// *** IMPORTANT ***
+// SMU TEAM: Always increment the interface version if
+// any structure is changed in this file
+#define SMU12_DRIVER_IF_VERSION 10
+
+typedef struct {
+ int32_t value;
+ uint32_t numFractionalBits;
+} FloatInIntFormat_t;
+
+typedef enum {
+ DSPCLK_DCFCLK = 0,
+ DSPCLK_DISPCLK,
+ DSPCLK_PIXCLK,
+ DSPCLK_PHYCLK,
+ DSPCLK_COUNT,
+} DSPCLK_e;
+
+typedef struct {
+ uint16_t Freq; // in MHz
+ uint16_t Vid; // min voltage in SVI2 VID
+} DisplayClockTable_t;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MinMclk;
+ uint16_t MaxMclk;
+
+ uint8_t WmSetting;
+ uint8_t WmType; // Used for normal pstate change or memory retraining
+ uint8_t Padding[2];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+#define WM_PSTATE_CHG 0
+#define WM_RETRAINING 1
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7]; // SMU internal use
+} Watermarks_t;
+
+typedef enum {
+ CUSTOM_DPM_SETTING_GFXCLK,
+ CUSTOM_DPM_SETTING_CCLK,
+ CUSTOM_DPM_SETTING_FCLK_CCX,
+ CUSTOM_DPM_SETTING_FCLK_GFX,
+ CUSTOM_DPM_SETTING_FCLK_STALLS,
+ CUSTOM_DPM_SETTING_LCLK,
+ CUSTOM_DPM_SETTING_COUNT,
+} CUSTOM_DPM_SETTING_e;
+
+typedef struct {
+ uint8_t ActiveHystLimit;
+ uint8_t IdleHystLimit;
+ uint8_t FPS;
+ uint8_t MinActiveFreqType;
+ FloatInIntFormat_t MinActiveFreq;
+ FloatInIntFormat_t PD_Data_limit;
+ FloatInIntFormat_t PD_Data_time_constant;
+ FloatInIntFormat_t PD_Data_error_coeff;
+ FloatInIntFormat_t PD_Data_error_rate_coeff;
+} DpmActivityMonitorCoeffExt_t;
+
+typedef struct {
+ DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT];
+} CustomDpmSettings_t;
+
+
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_MEMCLK_DPM_LEVELS 4
+#define NUM_VCN_DPM_LEVELS 8
+
+typedef struct {
+ uint32_t Freq; // In MHz
+ uint32_t Vol; // Millivolts with 2 fractional bits
+} DpmClock_t;
+
+typedef struct {
+ DpmClock_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS];
+ DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS];
+ DpmClock_t VClocks[NUM_VCN_DPM_LEVELS];
+ DpmClock_t DClocks[NUM_VCN_DPM_LEVELS];
+
+ uint8_t NumDcfClkDpmEnabled;
+ uint8_t NumSocClkDpmEnabled;
+ uint8_t NumFClkDpmEnabled;
+ uint8_t NumMemClkDpmEnabled;
+ uint8_t NumVClkDpmEnabled;
+ uint8_t NumDClkDpmEnabled;
+ uint8_t spare[2];
+} DpmClocks_t;
+
+
+typedef enum {
+ CLOCK_SMNCLK = 0,
+ CLOCK_SOCCLK,
+ CLOCK_MP0CLK,
+ CLOCK_MP1CLK,
+ CLOCK_MP2CLK,
+ CLOCK_VCLK,
+ CLOCK_LCLK,
+ CLOCK_DCLK,
+ CLOCK_ACLK,
+ CLOCK_ISPCLK,
+ CLOCK_SHUBCLK,
+ CLOCK_DISPCLK,
+ CLOCK_DPPCLK,
+ CLOCK_DPREFCLK,
+ CLOCK_DCFCLK,
+ CLOCK_FCLK,
+ CLOCK_UMCCLK,
+ CLOCK_GFXCLK,
+ CLOCK_COUNT,
+} CLOCK_IDs_e;
+
+// Throttler Status Bitmask
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+
+typedef struct {
+ uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
+
+ uint16_t AverageGfxclkFrequency; //[MHz]
+ uint16_t AverageSocclkFrequency; //[MHz]
+ uint16_t AverageVclkFrequency; //[MHz]
+ uint16_t AverageFclkFrequency; //[MHz]
+
+ uint16_t AverageGfxActivity; //[centi]
+ uint16_t AverageUvdActivity; //[centi]
+
+ uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
+
+ uint16_t FanPwm; //[milli]
+ uint16_t CurrentSocketPower; //[mW]
+
+ uint16_t CoreFrequency[8]; //[MHz]
+ uint16_t CorePower[8]; //[mW]
+ uint16_t CoreTemperature[8]; //[centi-Celsius]
+ uint16_t L3Frequency[2]; //[MHz]
+ uint16_t L3Temperature[2]; //[centi-Celsius]
+
+ uint16_t GfxTemperature; //[centi-Celsius]
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t ThrottlerStatus;
+ uint16_t spare;
+} SmuMetrics_t;
+
+
+// Workload bits
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
+
+#define TABLE_BIOS_IF 0 // Called by BIOS
+#define TABLE_WATERMARKS 1 // Called by Driver
+#define TABLE_CUSTOM_DPM 2 // Called by Driver
+#define TABLE_SPARE1 3
+#define TABLE_DPMCLOCKS 4 // Called by Driver
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
+#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
+#define TABLE_SMU_METRICS 7 // Called by Driver
+#define TABLE_COUNT 8
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
new file mode 100644
index 000000000000..b0dd05d431dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_TYPES_H__
+#define __SMU_TYPES_H__
+
+#define SMU_MESSAGE_TYPES \
+ __SMU_DUMMY_MAP(TestMessage), \
+ __SMU_DUMMY_MAP(GetSmuVersion), \
+ __SMU_DUMMY_MAP(GetDriverIfVersion), \
+ __SMU_DUMMY_MAP(SetAllowedFeaturesMaskLow), \
+ __SMU_DUMMY_MAP(SetAllowedFeaturesMaskHigh), \
+ __SMU_DUMMY_MAP(EnableAllSmuFeatures), \
+ __SMU_DUMMY_MAP(DisableAllSmuFeatures), \
+ __SMU_DUMMY_MAP(EnableSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(EnableSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(DisableSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(DisableSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(GetEnabledSmuFeaturesLow), \
+ __SMU_DUMMY_MAP(GetEnabledSmuFeaturesHigh), \
+ __SMU_DUMMY_MAP(SetWorkloadMask), \
+ __SMU_DUMMY_MAP(SetPptLimit), \
+ __SMU_DUMMY_MAP(SetDriverDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetDriverDramAddrLow), \
+ __SMU_DUMMY_MAP(SetToolsDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetToolsDramAddrLow), \
+ __SMU_DUMMY_MAP(TransferTableSmu2Dram), \
+ __SMU_DUMMY_MAP(TransferTableDram2Smu), \
+ __SMU_DUMMY_MAP(UseDefaultPPTable), \
+ __SMU_DUMMY_MAP(UseBackupPPTable), \
+ __SMU_DUMMY_MAP(RunBtc), \
+ __SMU_DUMMY_MAP(RequestI2CBus), \
+ __SMU_DUMMY_MAP(ReleaseI2CBus), \
+ __SMU_DUMMY_MAP(SetFloorSocVoltage), \
+ __SMU_DUMMY_MAP(SoftReset), \
+ __SMU_DUMMY_MAP(StartBacoMonitor), \
+ __SMU_DUMMY_MAP(CancelBacoMonitor), \
+ __SMU_DUMMY_MAP(EnterBaco), \
+ __SMU_DUMMY_MAP(SetSoftMinByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinByFreq), \
+ __SMU_DUMMY_MAP(SetHardMaxByFreq), \
+ __SMU_DUMMY_MAP(GetMinDpmFreq), \
+ __SMU_DUMMY_MAP(GetMaxDpmFreq), \
+ __SMU_DUMMY_MAP(GetDpmFreqByIndex), \
+ __SMU_DUMMY_MAP(GetDpmClockFreq), \
+ __SMU_DUMMY_MAP(GetSsVoltageByDpm), \
+ __SMU_DUMMY_MAP(SetMemoryChannelConfig), \
+ __SMU_DUMMY_MAP(SetGeminiMode), \
+ __SMU_DUMMY_MAP(SetGeminiApertureHigh), \
+ __SMU_DUMMY_MAP(SetGeminiApertureLow), \
+ __SMU_DUMMY_MAP(SetMinLinkDpmByIndex), \
+ __SMU_DUMMY_MAP(OverridePcieParameters), \
+ __SMU_DUMMY_MAP(OverDriveSetPercentage), \
+ __SMU_DUMMY_MAP(SetMinDeepSleepDcefclk), \
+ __SMU_DUMMY_MAP(ReenableAcDcInterrupt), \
+ __SMU_DUMMY_MAP(NotifyPowerSource), \
+ __SMU_DUMMY_MAP(SetUclkFastSwitch), \
+ __SMU_DUMMY_MAP(SetUclkDownHyst), \
+ __SMU_DUMMY_MAP(GfxDeviceDriverReset), \
+ __SMU_DUMMY_MAP(GetCurrentRpm), \
+ __SMU_DUMMY_MAP(SetVideoFps), \
+ __SMU_DUMMY_MAP(SetTjMax), \
+ __SMU_DUMMY_MAP(SetFanTemperatureTarget), \
+ __SMU_DUMMY_MAP(PrepareMp1ForUnload), \
+ __SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
+ __SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
+ __SMU_DUMMY_MAP(DramLogSetDramSize), \
+ __SMU_DUMMY_MAP(SetFanMaxRpm), \
+ __SMU_DUMMY_MAP(SetFanMinPwm), \
+ __SMU_DUMMY_MAP(ConfigureGfxDidt), \
+ __SMU_DUMMY_MAP(NumOfDisplays), \
+ __SMU_DUMMY_MAP(RemoveMargins), \
+ __SMU_DUMMY_MAP(ReadSerialNumTop32), \
+ __SMU_DUMMY_MAP(ReadSerialNumBottom32), \
+ __SMU_DUMMY_MAP(SetSystemVirtualDramAddrHigh), \
+ __SMU_DUMMY_MAP(SetSystemVirtualDramAddrLow), \
+ __SMU_DUMMY_MAP(WaflTest), \
+ __SMU_DUMMY_MAP(SetFclkGfxClkRatio), \
+ __SMU_DUMMY_MAP(AllowGfxOff), \
+ __SMU_DUMMY_MAP(DisallowGfxOff), \
+ __SMU_DUMMY_MAP(GetPptLimit), \
+ __SMU_DUMMY_MAP(GetDcModeMaxDpmFreq), \
+ __SMU_DUMMY_MAP(GetDebugData), \
+ __SMU_DUMMY_MAP(SetXgmiMode), \
+ __SMU_DUMMY_MAP(RunAfllBtc), \
+ __SMU_DUMMY_MAP(ExitBaco), \
+ __SMU_DUMMY_MAP(PrepareMp1ForReset), \
+ __SMU_DUMMY_MAP(PrepareMp1ForShutdown), \
+ __SMU_DUMMY_MAP(SetMGpuFanBoostLimitRpm), \
+ __SMU_DUMMY_MAP(GetAVFSVoltageByDpm), \
+ __SMU_DUMMY_MAP(PowerUpVcn), \
+ __SMU_DUMMY_MAP(PowerDownVcn), \
+ __SMU_DUMMY_MAP(PowerUpJpeg), \
+ __SMU_DUMMY_MAP(PowerDownJpeg), \
+ __SMU_DUMMY_MAP(BacoAudioD3PME), \
+ __SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunGfxDcBtc), \
+ __SMU_DUMMY_MAP(RunSocDcBtc), \
+ __SMU_DUMMY_MAP(SetMemoryChannelEnable), \
+ __SMU_DUMMY_MAP(SetDfSwitchType), \
+ __SMU_DUMMY_MAP(GetVoltageByDpm), \
+ __SMU_DUMMY_MAP(GetVoltageByDpmOverdrive), \
+ __SMU_DUMMY_MAP(PowerUpVcn0), \
+ __SMU_DUMMY_MAP(PowerDownVcn0), \
+ __SMU_DUMMY_MAP(PowerUpVcn1), \
+ __SMU_DUMMY_MAP(PowerDownVcn1), \
+ __SMU_DUMMY_MAP(PowerUpGfx), \
+ __SMU_DUMMY_MAP(PowerDownIspByTile), \
+ __SMU_DUMMY_MAP(PowerUpIspByTile), \
+ __SMU_DUMMY_MAP(PowerDownSdma), \
+ __SMU_DUMMY_MAP(PowerUpSdma), \
+ __SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinVcn), \
+ __SMU_DUMMY_MAP(Spare1), \
+ __SMU_DUMMY_MAP(Spare2), \
+ __SMU_DUMMY_MAP(SetAllowFclkSwitch), \
+ __SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
+ __SMU_DUMMY_MAP(ActiveProcessNotify), \
+ __SMU_DUMMY_MAP(SetCustomPolicy), \
+ __SMU_DUMMY_MAP(QueryPowerLimit), \
+ __SMU_DUMMY_MAP(SetGfxclkOverdriveByFreqVid), \
+ __SMU_DUMMY_MAP(SetHardMinDcfclkByFreq), \
+ __SMU_DUMMY_MAP(SetHardMinSocclkByFreq), \
+ __SMU_DUMMY_MAP(ControlIgpuATS), \
+ __SMU_DUMMY_MAP(SetMinVideoFclkFreq), \
+ __SMU_DUMMY_MAP(SetMinDeepSleepDcfclk), \
+ __SMU_DUMMY_MAP(ForcePowerDownGfx), \
+ __SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
+ __SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMinVcn), \
+ __SMU_DUMMY_MAP(EnablePostCode), \
+ __SMU_DUMMY_MAP(GetGfxclkFrequency), \
+ __SMU_DUMMY_MAP(GetFclkFrequency), \
+ __SMU_DUMMY_MAP(GetMinGfxclkFrequency), \
+ __SMU_DUMMY_MAP(GetMaxGfxclkFrequency), \
+ __SMU_DUMMY_MAP(SetGfxCGPG), \
+ __SMU_DUMMY_MAP(SetSoftMaxGfxClk), \
+ __SMU_DUMMY_MAP(SetHardMinGfxClk), \
+ __SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
+ __SMU_DUMMY_MAP(SetSoftMaxVcn), \
+ __SMU_DUMMY_MAP(PowerGateMmHub), \
+ __SMU_DUMMY_MAP(UpdatePmeRestore), \
+ __SMU_DUMMY_MAP(GpuChangeState), \
+ __SMU_DUMMY_MAP(SetPowerLimitPercentage), \
+ __SMU_DUMMY_MAP(ForceGfxContentSave), \
+ __SMU_DUMMY_MAP(EnableTmdp48MHzRefclkPwrDown), \
+ __SMU_DUMMY_MAP(PowerGateAtHub), \
+ __SMU_DUMMY_MAP(SetSoftMinJpeg), \
+ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+enum smu_message_type {
+ SMU_MESSAGE_TYPES
+ SMU_MSG_MAX_COUNT,
+};
+
+enum smu_clk_type {
+ SMU_GFXCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_ECLK,
+ SMU_SOCCLK,
+ SMU_UCLK,
+ SMU_DCEFCLK,
+ SMU_DISPCLK,
+ SMU_PIXCLK,
+ SMU_PHYCLK,
+ SMU_FCLK,
+ SMU_SCLK,
+ SMU_MCLK,
+ SMU_PCIE,
+ SMU_OD_SCLK,
+ SMU_OD_MCLK,
+ SMU_OD_VDDC_CURVE,
+ SMU_OD_RANGE,
+ SMU_CLK_COUNT,
+};
+
+#define SMU_FEATURE_MASKS \
+ __SMU_DUMMY_MAP(DPM_PREFETCHER), \
+ __SMU_DUMMY_MAP(DPM_GFXCLK), \
+ __SMU_DUMMY_MAP(DPM_UCLK), \
+ __SMU_DUMMY_MAP(DPM_SOCCLK), \
+ __SMU_DUMMY_MAP(DPM_UVD), \
+ __SMU_DUMMY_MAP(DPM_VCE), \
+ __SMU_DUMMY_MAP(ULV), \
+ __SMU_DUMMY_MAP(DPM_MP0CLK), \
+ __SMU_DUMMY_MAP(DPM_LINK), \
+ __SMU_DUMMY_MAP(DPM_DCEFCLK), \
+ __SMU_DUMMY_MAP(DS_GFXCLK), \
+ __SMU_DUMMY_MAP(DS_SOCCLK), \
+ __SMU_DUMMY_MAP(DS_LCLK), \
+ __SMU_DUMMY_MAP(PPT), \
+ __SMU_DUMMY_MAP(TDC), \
+ __SMU_DUMMY_MAP(THERMAL), \
+ __SMU_DUMMY_MAP(GFX_PER_CU_CG), \
+ __SMU_DUMMY_MAP(RM), \
+ __SMU_DUMMY_MAP(DS_DCEFCLK), \
+ __SMU_DUMMY_MAP(ACDC), \
+ __SMU_DUMMY_MAP(VR0HOT), \
+ __SMU_DUMMY_MAP(VR1HOT), \
+ __SMU_DUMMY_MAP(FW_CTF), \
+ __SMU_DUMMY_MAP(LED_DISPLAY), \
+ __SMU_DUMMY_MAP(FAN_CONTROL), \
+ __SMU_DUMMY_MAP(GFX_EDC), \
+ __SMU_DUMMY_MAP(GFXOFF), \
+ __SMU_DUMMY_MAP(CG), \
+ __SMU_DUMMY_MAP(DPM_FCLK), \
+ __SMU_DUMMY_MAP(DS_FCLK), \
+ __SMU_DUMMY_MAP(DS_MP1CLK), \
+ __SMU_DUMMY_MAP(DS_MP0CLK), \
+ __SMU_DUMMY_MAP(XGMI), \
+ __SMU_DUMMY_MAP(DPM_GFX_PACE), \
+ __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
+ __SMU_DUMMY_MAP(MEM_MVDD_SCALING), \
+ __SMU_DUMMY_MAP(DS_UCLK), \
+ __SMU_DUMMY_MAP(GFX_ULV), \
+ __SMU_DUMMY_MAP(FW_DSTATE), \
+ __SMU_DUMMY_MAP(BACO), \
+ __SMU_DUMMY_MAP(VCN_PG), \
+ __SMU_DUMMY_MAP(JPEG_PG), \
+ __SMU_DUMMY_MAP(USB_PG), \
+ __SMU_DUMMY_MAP(RSMU_SMN_CG), \
+ __SMU_DUMMY_MAP(APCC_PLUS), \
+ __SMU_DUMMY_MAP(GTHR), \
+ __SMU_DUMMY_MAP(GFX_DCS), \
+ __SMU_DUMMY_MAP(GFX_SS), \
+ __SMU_DUMMY_MAP(OUT_OF_BAND_MONITOR), \
+ __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
+ __SMU_DUMMY_MAP(MMHUB_PG), \
+ __SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(WAFL_CG),
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
+enum smu_feature_mask {
+ SMU_FEATURE_MASKS
+ SMU_FEATURE_COUNT,
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 2fff4b16cb4e..5bda8539447a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -25,6 +25,12 @@
#include "amdgpu_smu.h"
+#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU11_DRIVER_IF_VERSION_VG20 0x13
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_NV10 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -43,19 +49,30 @@
#define SMU11_TOOL_SIZE 0x19000
#define CLK_MAP(clk, index) \
- [SMU_##clk] = index
+ [SMU_##clk] = {1, (index)}
#define FEA_MAP(fea) \
- [SMU_FEATURE_##fea##_BIT] = FEATURE_##fea##_BIT
+ [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
#define TAB_MAP(tab) \
- [SMU_TABLE_##tab] = TABLE_##tab
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
#define PWR_MAP(tab) \
- [SMU_POWER_SOURCE_##tab] = POWER_SOURCE_##tab
+ [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
#define WORKLOAD_MAP(profile, workload) \
- [profile] = workload
+ [profile] = {1, (workload)}
+
+static const struct smu_temperature_range smu11_thermal_policy[] =
+{
+ {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
+};
+
+struct smu_11_0_cmn2aisc_mapping {
+ int valid_mapping;
+ int map_to;
+};
struct smu_11_0_max_sustainable_clocks {
uint32_t display_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
new file mode 100644
index 000000000000..acf3db12f59f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V12_0_H__
+#define __SMU_V12_0_H__
+
+#include "amdgpu_smu.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+
+struct smu_12_0_cmn2aisc_mapping {
+ int valid_mapping;
+ int map_to;
+};
+
+void smu_v12_0_set_smu_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h
new file mode 100644
index 000000000000..9ac9f3bd3664
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_12_0_PPSMC_H
+#define SMU_12_0_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_PowerUpGfx 0x6
+#define PPSMC_MSG_EnableGfxOff 0x7
+#define PPSMC_MSG_DisableGfxOff 0x8
+#define PPSMC_MSG_PowerDownIspByTile 0x9 // ISP is power gated by default
+#define PPSMC_MSG_PowerUpIspByTile 0xA
+#define PPSMC_MSG_PowerDownVcn 0xB // VCN is power gated by default
+#define PPSMC_MSG_PowerUpVcn 0xC
+#define PPSMC_MSG_PowerDownSdma 0xD // SDMA is power gated by default
+#define PPSMC_MSG_PowerUpSdma 0xE
+#define PPSMC_MSG_SetHardMinIspclkByFreq 0xF
+#define PPSMC_MSG_SetHardMinVcn 0x10 // For wireless display
+#define PPSMC_MSG_spare1 0x11
+#define PPSMC_MSG_spare2 0x12
+#define PPSMC_MSG_SetAllowFclkSwitch 0x13
+#define PPSMC_MSG_SetMinVideoGfxclkFreq 0x14
+#define PPSMC_MSG_ActiveProcessNotify 0x15
+#define PPSMC_MSG_SetCustomPolicy 0x16
+#define PPSMC_MSG_SetVideoFps 0x17
+#define PPSMC_MSG_SetDisplayCount 0x18 // Moved to VBIOS
+#define PPSMC_MSG_QueryPowerLimit 0x19 //Driver to look up sustainable clocks for VQ
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x1A
+#define PPSMC_MSG_SetDriverDramAddrLow 0x1B
+#define PPSMC_MSG_TransferTableSmu2Dram 0x1C
+#define PPSMC_MSG_TransferTableDram2Smu 0x1D
+#define PPSMC_MSG_GfxDeviceDriverReset 0x1E
+#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F
+#define PPSMC_MSG_SetHardMinDcfclkByFreq 0x20 // Moved to VBIOS
+#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21
+#define PPSMC_MSG_ControlIgpuATS 0x22
+#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
+#define PPSMC_MSG_SetMinDeepSleepDcfclk 0x24 // Moved to VBIOS
+#define PPSMC_MSG_ForcePowerDownGfx 0x25
+#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 // Moved to VBIOS
+#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 // Moved to VBIOS and is SetDppclkFreq
+#define PPSMC_MSG_SetSoftMinVcn 0x28
+#define PPSMC_MSG_EnablePostCode 0x29
+#define PPSMC_MSG_GetGfxclkFrequency 0x2A
+#define PPSMC_MSG_GetFclkFrequency 0x2B
+#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
+#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
+#define PPSMC_MSG_SoftReset 0x2E // Not supported
+#define PPSMC_MSG_SetGfxCGPG 0x2F
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
+#define PPSMC_MSG_SetHardMinGfxClk 0x31
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
+#define PPSMC_MSG_SetSoftMaxVcn 0x34
+#define PPSMC_MSG_PowerGateMmHub 0x35
+#define PPSMC_MSG_UpdatePmeRestore 0x36 // Moved to VBIOS
+#define PPSMC_MSG_GpuChangeState 0x37
+#define PPSMC_MSG_SetPowerLimitPercentage 0x38
+#define PPSMC_MSG_ForceGfxContentSave 0x39
+#define PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x3A // Moved to VBIOS
+#define PPSMC_MSG_PowerDownJpeg 0x3B
+#define PPSMC_MSG_PowerUpJpeg 0x3C
+#define PPSMC_MSG_PowerGateAtHub 0x3D
+#define PPSMC_MSG_SetSoftMinJpeg 0x3E
+#define PPSMC_MSG_SetHardMinFclkByFreq 0x3F
+#define PPSMC_Message_Count 0x40
+
+
+//Argument for PPSMC_MSG_GpuChangeState
+enum {
+ eGpuChangeState_D0Entry = 1,
+ eGpuChangeState_D3Entry,
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index b81c7e715dc9..12c0e469bf35 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -50,9 +50,9 @@
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
#define MSG_MAP(msg, index) \
- [SMU_MSG_##msg] = index
+ [SMU_MSG_##msg] = {1, (index)}
-static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
@@ -119,7 +119,7 @@ static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
};
-static int navi10_clk_map[SMU_CLK_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -134,7 +134,7 @@ static int navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(PHYCLK, PPCLK_PHYCLK),
};
-static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_PACE),
@@ -179,7 +179,7 @@ static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(ATHUB_PG),
};
-static int navi10_table_map[SMU_TABLE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS),
@@ -194,12 +194,12 @@ static int navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PACE),
};
-static int navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static int navi10_workload_map[] = {
+static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -211,79 +211,93 @@ static int navi10_workload_map[] = {
static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
- int val;
- if (index > SMU_MSG_MAX_COUNT)
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
- val = navi10_message_map[index];
- if (val > PPSMC_Message_Count)
+ mapping = navi10_message_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_CLK_COUNT)
return -EINVAL;
- val = navi10_clk_map[index];
- if (val >= PPCLK_COUNT)
+ mapping = navi10_clk_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
- val = navi10_feature_mask_map[index];
- if (val > 64)
+ mapping = navi10_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
- val = navi10_table_map[index];
- if (val >= TABLE_COUNT)
+ mapping = navi10_table_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
- val = navi10_pwr_src_map[index];
- if (val >= POWER_SOURCE_COUNT)
+ mapping = navi10_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
- val = navi10_workload_map[profile];
+ mapping = navi10_workload_map[profile];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static bool is_asic_secure(struct smu_context *smu)
@@ -355,7 +369,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT)
+ | FEATURE_MASK(FEATURE_JPEG_PG_BIT);
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
@@ -927,8 +942,6 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
- if (ret)
- return ret;
*value = metrics.AverageSocketPower << 8;
@@ -987,8 +1000,6 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
- if (ret)
- return ret;
*speed = metrics.CurrFanSpeed;
@@ -1017,7 +1028,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1050,6 +1061,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
@@ -1178,6 +1192,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
@@ -1367,6 +1383,9 @@ static int navi10_read_sensor(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1388,7 +1407,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- return -EINVAL;
+ ret = smu_smc_read_sensor(smu, sensor, data, size);
}
return ret;
@@ -1423,169 +1442,6 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
return 0;
}
-static int navi10_get_ppfeature_status(struct smu_context *smu,
- char *buf)
-{
- static const char *ppfeature_name[] = {
- "DPM_PREFETCHER",
- "DPM_GFXCLK",
- "DPM_GFX_PACE",
- "DPM_UCLK",
- "DPM_SOCCLK",
- "DPM_MP0CLK",
- "DPM_LINK",
- "DPM_DCEFCLK",
- "MEM_VDDCI_SCALING",
- "MEM_MVDD_SCALING",
- "DS_GFXCLK",
- "DS_SOCCLK",
- "DS_LCLK",
- "DS_DCEFCLK",
- "DS_UCLK",
- "GFX_ULV",
- "FW_DSTATE",
- "GFXOFF",
- "BACO",
- "VCN_PG",
- "JPEG_PG",
- "USB_PG",
- "RSMU_SMN_CG",
- "PPT",
- "TDC",
- "GFX_EDC",
- "APCC_PLUS",
- "GTHR",
- "ACDC",
- "VR0HOT",
- "VR1HOT",
- "FW_CTF",
- "FAN_CONTROL",
- "THERMAL",
- "GFX_DCS",
- "RM",
- "LED_DISPLAY",
- "GFX_SS",
- "OUT_OF_BAND_MONITOR",
- "TEMP_DEPENDENT_VMIN",
- "MMHUB_PG",
- "ATHUB_PG"};
- static const char *output_title[] = {
- "FEATURES",
- "BITMASK",
- "ENABLEMENT"};
- uint64_t features_enabled;
- uint32_t feature_mask[2];
- int i;
- int ret = 0;
- int size = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- PP_ASSERT_WITH_CODE(!ret,
- "[GetPPfeatureStatus] Failed to get enabled smc features!",
- return ret);
- features_enabled = (uint64_t)feature_mask[0] |
- (uint64_t)feature_mask[1] << 32;
-
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
- output_title[0],
- output_title[1],
- output_title[2]);
- for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
- ppfeature_name[i],
- 1ULL << i,
- (features_enabled & (1ULL << i)) ? "Y" : "N");
- }
-
- return size;
-}
-
-static int navi10_enable_smc_features(struct smu_context *smu,
- bool enabled,
- uint64_t feature_masks)
-{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_low, feature_high;
- uint32_t feature_mask[2];
- int ret = 0;
-
- feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF);
- feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32);
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
- }
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- mutex_lock(&feature->mutex);
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- mutex_unlock(&feature->mutex);
-
- return 0;
-}
-
-static int navi10_set_ppfeature_status(struct smu_context *smu,
- uint64_t new_ppfeature_masks)
-{
- uint64_t features_enabled;
- uint32_t feature_mask[2];
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to get enabled smc features!",
- return ret);
- features_enabled = (uint64_t)feature_mask[0] |
- (uint64_t)feature_mask[1] << 32;
-
- features_to_disable =
- features_enabled & ~new_ppfeature_masks;
- features_to_enable =
- ~features_enabled & new_ppfeature_masks;
-
- pr_debug("features_to_disable 0x%llx\n", features_to_disable);
- pr_debug("features_to_enable 0x%llx\n", features_to_enable);
-
- if (features_to_disable) {
- ret = navi10_enable_smc_features(smu, false, features_to_disable);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to disable smc features!",
- return ret);
- }
-
- if (features_to_enable) {
- ret = navi10_enable_smc_features(smu, true, features_to_enable);
- PP_ASSERT_WITH_CODE(!ret,
- "[SetPPfeatureStatus] Failed to enable smc features!",
- return ret);
- }
-
- return 0;
-}
-
static int navi10_set_peak_clock_by_device(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1627,6 +1483,10 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->asic_type != CHIP_NAVI10)
+ return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1649,9 +1509,82 @@ static int navi10_get_thermal_temperature_range(struct smu_context *smu,
if (!range || !powerplay_table)
return -EINVAL;
- /* The unit is temperature */
- range->min = 0;
- range->max = powerplay_table->software_shutdown_temp;
+ range->max = powerplay_table->software_shutdown_temp *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
+ bool disable_memory_clock_switch)
+{
+ int ret = 0;
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
+ (struct smu_11_0_max_sustainable_clocks *)
+ smu->smu_table.max_sustainable_clocks;
+ uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
+ uint32_t max_memory_clock = max_sustainable_clocks->uclock;
+
+ if(smu->disable_uclk_switch == disable_memory_clock_switch)
+ return 0;
+
+ if(disable_memory_clock_switch)
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
+ else
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
+
+ if(!ret)
+ smu->disable_uclk_switch = disable_memory_clock_switch;
+
+ return ret;
+}
+
+static int navi10_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool asic_default)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t asic_default_power_limit = 0;
+ int ret = 0;
+ int power_src;
+
+ if (!smu->default_power_limit ||
+ !smu->power_limit) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ power_src << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, &asic_default_power_limit);
+ } else {
+ /* the last hope to figure out the ppt limit */
+ if (!pptable) {
+ pr_err("Cannot get PPT limit due to pptable missing!");
+ return -EINVAL;
+ }
+ asic_default_power_limit =
+ pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+ }
+
+ if (smu->od_enabled) {
+ asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
+ asic_default_power_limit /= 100;
+ }
+
+ smu->default_power_limit = asic_default_power_limit;
+ smu->power_limit = asic_default_power_limit;
+ }
+
+ if (asic_default)
+ *limit = smu->default_power_limit;
+ else
+ *limit = smu->power_limit;
return 0;
}
@@ -1690,10 +1623,10 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_watermarks_table = navi10_set_watermarks_table,
.read_sensor = navi10_read_sensor,
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
- .get_ppfeature_status = navi10_get_ppfeature_status,
- .set_ppfeature_status = navi10_set_ppfeature_status,
.set_performance_level = navi10_set_performance_level,
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
+ .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
+ .get_power_limit = navi10_get_power_limit,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
@@ -1701,6 +1634,5 @@ void navi10_set_ppt_funcs(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
smu->ppt_funcs = &navi10_ppt_funcs;
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
new file mode 100644
index 000000000000..2a6da546fb55
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "soc15_common.h"
+#include "smu_v12_0_ppsmc.h"
+#include "smu12_driver_if.h"
+#include "smu_v12_0.h"
+#include "renoir_ppt.h"
+
+
+#define MSG_MAP(msg, index) \
+ [SMU_MSG_##msg] = {1, (index)}
+
+#define TAB_MAP_VALID(tab) \
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_INVALID(tab) \
+ [SMU_TABLE_##tab] = {0, TABLE_##tab}
+
+static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
+ MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff),
+ MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile),
+ MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
+ MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma),
+ MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma),
+ MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq),
+ MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn),
+ MSG_MAP(Spare1, PPSMC_MSG_spare1),
+ MSG_MAP(Spare2, PPSMC_MSG_spare2),
+ MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch),
+ MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq),
+ MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify),
+ MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy),
+ MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
+ MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount),
+ MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset),
+ MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid),
+ MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq),
+ MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq),
+ MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS),
+ MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq),
+ MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk),
+ MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx),
+ MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq),
+ MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq),
+ MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn),
+ MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode),
+ MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency),
+ MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency),
+ MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
+ MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk),
+ MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk),
+ MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq),
+ MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq),
+ MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn),
+ MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub),
+ MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore),
+ MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState),
+ MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage),
+ MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave),
+ MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
+ MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub),
+ MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg),
+ MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq),
+};
+
+static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(WATERMARKS),
+ TAB_MAP_INVALID(CUSTOM_DPM),
+ TAB_MAP_VALID(DPMCLOCKS),
+ TAB_MAP_VALID(SMU_METRICS),
+};
+
+static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_message_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
+{
+ struct smu_12_0_cmn2aisc_mapping mapping;
+
+ if (index >= SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ mapping = renoir_table_map[index];
+ if (!(mapping.valid_mapping))
+ return -EINVAL;
+
+ return mapping.map_to;
+}
+
+static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ if (!smu_table->clocks_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * This interface just for getting uclk ultimate freq and should't introduce
+ * other likewise function result in overmuch callback.
+ */
+static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+{
+
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+
+ if (!clock || !table)
+ return -EINVAL;
+
+ if (max)
+ *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
+ else
+ *clock = table->FClocks[0].Freq;
+
+ return 0;
+
+}
+
+static const struct pptable_funcs renoir_ppt_funcs = {
+ .get_smu_msg_index = renoir_get_smu_msg_index,
+ .get_smu_table_index = renoir_get_smu_table_index,
+ .tables_init = renoir_tables_init,
+ .set_power_state = NULL,
+ .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+};
+
+void renoir_set_ppt_funcs(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ smu->ppt_funcs = &renoir_ppt_funcs;
+ smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+ smu_table->table_count = TABLE_COUNT;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
new file mode 100644
index 000000000000..e9b7237c0f7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RENOIR_PPT_H__
+#define __RENOIR_PPT_H__
+
+extern void renoir_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 53097961bf2b..c5257ae3188a 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -33,6 +33,7 @@
#include "soc15_common.h"
#include "atom.h"
#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
#include "navi10_ppt.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
@@ -45,7 +46,10 @@
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
+MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
+MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
#define SMU11_VOLTAGE_SCALE 4
@@ -102,8 +106,8 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
ret = smu_v11_0_wait_for_response(smu);
if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x\n", index,
- ret);
+ pr_err("failed send message: %10s (%d) response %#x\n",
+ smu_get_message_name(smu, msg), index, ret);
return ret;
@@ -123,8 +127,8 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
ret = smu_v11_0_wait_for_response(smu);
if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
- index, ret, param);
+ pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -134,8 +138,8 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
ret = smu_v11_0_wait_for_response(smu);
if (ret)
- pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
- index, ret, param);
+ pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
return ret;
}
@@ -154,9 +158,18 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
case CHIP_VEGA20:
chip_name = "vega20";
break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
case CHIP_NAVI10:
chip_name = "navi10";
break;
+ case CHIP_NAVI14:
+ chip_name = "navi14";
+ break;
+ case CHIP_NAVI12:
+ chip_name = "navi12";
+ break;
default:
BUG();
}
@@ -202,7 +215,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
uint32_t i;
uint32_t mp1_fw_flags;
- hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -261,6 +274,25 @@ static int smu_v11_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
+ switch (smu->adev->asic_type) {
+ case CHIP_VEGA20:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ break;
+ case CHIP_ARCTURUS:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ break;
+ case CHIP_NAVI10:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ break;
+ case CHIP_NAVI14:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ break;
+ default:
+ pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
/*
* 1. if_version mismatch is not critical as our fw is designed
* to be backward compatible.
@@ -295,7 +327,8 @@ static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uin
return 0;
}
-static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id)
+static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v2_1 *v2_1;
@@ -537,6 +570,9 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
}
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
return 0;
}
@@ -616,6 +652,24 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ if ((smu->smu_table.boot_values.format_revision == 3) &&
+ (smu->smu_table.boot_values.content_revision >= 2)) {
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
+ input.syspll_id = SMU11_SYSPLL1_2_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ }
+
return 0;
}
@@ -724,8 +778,6 @@ static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
struct smu_table *table = NULL;
table = &smu_table->tables[SMU_TABLE_WATERMARKS];
- if (!table)
- return -EINVAL;
if (!table->cpu_addr)
return -EINVAL;
@@ -790,44 +842,6 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
-static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- if (feature_id >= 0 && feature_id < 31)
- feature_low = (1 << feature_id);
- else if (feature_id > 31 && feature_id < 63)
- feature_high = (1 << feature_id);
- else
- return -EINVAL;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
-
- return ret;
-}
static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
@@ -929,11 +943,21 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
enum smu_clk_type clock_select)
{
int ret = 0;
+ int clk_id;
if (!smu->pm_enabled)
return ret;
+
+ if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
+ (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clock_select);
+ if (clk_id < 0)
+ return -EINVAL;
+
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
@@ -948,7 +972,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
@@ -1039,57 +1063,32 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_power_limit(struct smu_context *smu,
- uint32_t *limit,
- bool get_default)
+static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
- if (get_default) {
- mutex_lock(&smu->mutex);
- *limit = smu->default_power_limit;
- if (smu->od_enabled) {
- *limit *= (100 + smu->smu_table.TDPODLimit);
- *limit /= 100;
- }
- mutex_unlock(&smu->mutex);
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16);
- if (ret) {
- pr_err("[%s] get PPT limit failed!", __func__);
- return ret;
- }
- smu_read_smc_arg(smu, limit);
- smu->power_limit = *limit;
+ if (n > smu->default_power_limit) {
+ pr_err("New power limit is over the max allowed %d\n",
+ smu->default_power_limit);
+ return -EINVAL;
}
- return ret;
-}
-
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
-{
- uint32_t max_power_limit;
- int ret = 0;
-
if (n == 0)
n = smu->default_power_limit;
- max_power_limit = smu->default_power_limit;
-
- if (smu->od_enabled) {
- max_power_limit *= (100 + smu->smu_table.TDPODLimit);
- max_power_limit /= 100;
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ pr_err("Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
if (ret) {
- pr_err("[%s] Set power limit Failed!", __func__);
+ pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
}
+ smu->power_limit = n;
- return ret;
+ return 0;
}
static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
@@ -1098,16 +1097,21 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
{
int ret = 0;
uint32_t freq = 0;
+ int asic_clk_id;
if (clk_id >= SMU_CLK_COUNT || !value)
return -EINVAL;
+ asic_clk_id = smu_clk_get_index(smu, clk_id);
+ if (asic_clk_id < 0)
+ return -EINVAL;
+
/* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
- if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0)
+ if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (smu_clk_get_index(smu, clk_id) << 16));
+ (asic_clk_id << 16));
if (ret)
return ret;
@@ -1123,23 +1127,17 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
}
static int smu_v11_0_set_thermal_range(struct smu_context *smu,
- struct smu_temperature_range *range)
+ struct smu_temperature_range range)
{
struct amdgpu_device *adev = smu->adev;
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (!range)
- return -EINVAL;
-
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
-
- low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
- high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+ range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
if (low > high)
return -EINVAL;
@@ -1175,27 +1173,20 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
static int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
- struct smu_temperature_range range = {
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MIN,
- TEMP_RANGE_MAX,
- TEMP_RANGE_MAX};
+ struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
if (!smu->pm_enabled)
return ret;
+ memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
+
ret = smu_get_thermal_temperature_range(smu, &range);
if (ret)
return ret;
if (smu->smu_table.thermal_controller_type) {
- ret = smu_v11_0_set_thermal_range(smu, &range);
+ ret = smu_v11_0_set_thermal_range(smu, range);
if (ret)
return ret;
@@ -1208,17 +1199,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
- adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
return ret;
}
@@ -1252,6 +1241,10 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
void *data, uint32_t *size)
{
int ret = 0;
+
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
@@ -1274,10 +1267,6 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
break;
}
- /* try get sensor data by asic */
- if (ret)
- ret = smu_asic_read_sensor(smu, sensor, data, size);
-
if (ret)
*size = 0;
@@ -1324,10 +1313,15 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (ret)
goto failed;
+ if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
+ return 0;
+
mutex_lock(&smu->mutex);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- (smu_clk_get_index(smu, clk_select) << 16) | clk_freq);
+ ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
mutex_unlock(&smu->mutex);
+
+ if(clk_select == SMU_UCLK)
+ smu->hard_min_uclk_req_from_dal = clk_freq;
}
failed:
@@ -1363,6 +1357,8 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_VEGA20:
break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
mutex_lock(&smu->mutex);
@@ -1389,17 +1385,17 @@ smu_v11_0_get_fan_control_mode(struct smu_context *smu)
}
static int
-smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
+smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{
int ret = 0;
if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
pr_err("[%s]%s smc FAN CONTROL feature failed!",
- __func__, (start ? "Start" : "Stop"));
+ __func__, (auto_fan_control ? "Start" : "Stop"));
return ret;
}
@@ -1423,16 +1419,15 @@ static int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t duty100;
- uint32_t duty;
+ uint32_t duty100, duty;
uint64_t tmp64;
- bool stop = 0;
if (speed > 100)
speed = 100;
- if (smu_v11_0_smc_fan_control(smu, stop))
+ if (smu_v11_0_auto_fan_control(smu, 0))
return -EINVAL;
+
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (!duty100)
@@ -1454,18 +1449,16 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
int ret = 0;
- bool start = 1;
- bool stop = 0;
switch (mode) {
case AMD_FAN_CTRL_NONE:
ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
- ret = smu_v11_0_smc_fan_control(smu, stop);
+ ret = smu_v11_0_auto_fan_control(smu, 0);
break;
case AMD_FAN_CTRL_AUTO:
- ret = smu_v11_0_smc_fan_control(smu, start);
+ ret = smu_v11_0_auto_fan_control(smu, 1);
break;
default:
break;
@@ -1485,13 +1478,12 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
struct amdgpu_device *adev = smu->adev;
int ret;
uint32_t tach_period, crystal_clock_freq;
- bool stop = 0;
if (!speed)
return -EINVAL;
mutex_lock(&(smu->mutex));
- ret = smu_v11_0_smc_fan_control(smu, stop);
+ ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
goto set_fan_speed_rpm_failed;
@@ -1672,7 +1664,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
+ enum smu_baco_state baco_state;
mutex_lock(&smu_baco->mutex);
baco_state = smu_baco->state;
@@ -1726,6 +1718,43 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
+static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+
+ mutex_lock(&smu->mutex);
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
static const struct smu_funcs smu_v11_0_funcs = {
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
@@ -1744,7 +1773,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.check_pptable = smu_v11_0_check_pptable,
.parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
.write_pptable = smu_v11_0_write_pptable,
.write_watermarks_table = smu_v11_0_write_watermarks_table,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
@@ -1753,9 +1782,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
.system_features_control = smu_v11_0_system_features_control,
- .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
.notify_display_change = smu_v11_0_notify_display_change,
- .get_power_limit = smu_v11_0_get_power_limit,
.set_power_limit = smu_v11_0_set_power_limit,
.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -1777,6 +1804,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
};
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
@@ -1788,7 +1816,12 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu)
case CHIP_VEGA20:
vega20_set_ppt_funcs(smu);
break;
+ case CHIP_ARCTURUS:
+ arcturus_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
navi10_set_ppt_funcs(smu);
break;
default:
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
new file mode 100644
index 000000000000..9d2280ca1f4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v12_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "renoir_ppt.h"
+
+#include "asic_reg/mp/mp_12_0_0_offset.h"
+#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+
+static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ return 0;
+}
+
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ return 0;
+}
+
+static int smu_v12_0_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ smu_v12_0_wait_for_response(smu);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v12_0_wait_for_response(smu);
+
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x\n", index,
+ ret);
+
+ return ret;
+
+}
+
+static int
+smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ ret = smu_v12_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
+ index, ret, param);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v12_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+
+ return ret;
+}
+
+static int smu_v12_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v12_0_check_fw_version(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint16_t smu_major;
+ uint8_t smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_major = (smu_version >> 16) & 0xffff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a warning message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_if_version) {
+ pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_if_version, if_version,
+ smu_version, smu_major, smu_minor, smu_debug);
+ pr_warn("SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+{
+ if (!(smu->adev->flags & AMD_IS_APU))
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ else
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+}
+
+static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+{
+ if (!(smu->adev->flags & AMD_IS_APU))
+ return 0;
+
+ if (gate)
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ else
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+}
+
+static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+{
+ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ return 0;
+
+ return smu_v12_0_send_msg_with_param(smu,
+ SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+}
+
+/**
+ * smu_v12_0_get_gfxoff_status - get gfxoff status
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff status
+ *
+ * Returns 0=GFXOFF(default).
+ * Returns 1=Transition out of GFX State.
+ * Returns 2=Not in GFXOFF.
+ * Returns 3=Transition into GFXOFF.
+ */
+static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+{
+ uint32_t reg;
+ uint32_t gfxOff_Status = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
+ gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
+ >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
+
+ return gfxOff_Status;
+}
+
+static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0, timeout = 500;
+
+ if (enable) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+
+ /* confirm gfx is back to "off" state, timeout is 5 seconds */
+ while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
+ msleep(10);
+ timeout--;
+ if (timeout == 0) {
+ DRM_ERROR("enable gfxoff timeout and failed!\n");
+ break;
+ }
+ }
+ } else {
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+
+ /* confirm gfx is back to "on" state, timeout is 0.5 second */
+ while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
+ msleep(1);
+ timeout--;
+ if (timeout == 0) {
+ DRM_ERROR("disable gfxoff timeout and failed!\n");
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v12_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = NULL;
+
+ if (smu_table->tables || smu_table->table_count == 0)
+ return -EINVAL;
+
+ tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+ GFP_KERNEL);
+ if (!tables)
+ return -ENOMEM;
+
+ smu_table->tables = tables;
+
+ return smu_tables_init(smu, tables);
+}
+
+static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ if (!smu_table->tables || smu_table->table_count == 0)
+ return -EINVAL;
+
+ kfree(smu_table->clocks_table);
+ kfree(smu_table->tables);
+
+ smu_table->clocks_table = NULL;
+ smu_table->tables = NULL;
+
+ return 0;
+}
+
+static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
+ if (!table)
+ return -EINVAL;
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
+static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ if (ret) {
+ pr_err("Attempt to get max GX frequency from SMC Failed !\n");
+ goto failed;
+ }
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_UCLK:
+ ret = smu_get_dpm_uclk_limited(smu, max, true);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ if (ret) {
+ pr_err("Attempt to get min GX frequency from SMC Failed !\n");
+ goto failed;
+ }
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_UCLK:
+ ret = smu_get_dpm_uclk_limited(smu, min, false);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ }
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static const struct smu_funcs smu_v12_0_funcs = {
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .send_smc_msg = smu_v12_0_send_msg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+};
+
+void smu_v12_0_set_smu_funcs(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->funcs = &smu_v12_0_funcs;
+
+ switch (adev->asic_type) {
+ case CHIP_RENOIR:
+ renoir_set_ppt_funcs(smu);
+ break;
+ default:
+ pr_warn("Unknown asic for smu12\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 7fb3e57cfc41..3f12cf341511 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -118,6 +118,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -135,6 +136,9 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 8189fe402c6d..4728aa23a818 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -722,16 +722,17 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
static int smu8_start_smu(struct pp_hwmgr *hwmgr)
{
- struct amdgpu_device *adev = hwmgr->adev;
+ struct amdgpu_device *adev;
uint32_t index = SMN_MP1_SRAM_START_ADDR +
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, Version);
-
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
+ adev = hwmgr->adev;
+
cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
pr_info("smu version %02d.%02d.%02d\n",
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 967d34b1dc51..0dbdde69f2d9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -39,6 +39,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -56,6 +57,9 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index bab3df85fdcd..f9589806bf83 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -42,6 +42,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL);
@@ -64,6 +65,9 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 957446cf467e..b9089c6bea85 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -44,7 +44,7 @@
#define smnMP0_FW_INTF 0x30101c0
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
@@ -163,6 +163,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -187,6 +188,9 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -266,6 +270,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
@@ -284,6 +289,9 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index ec953ab13e87..62ebbfd6068f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -57,5 +57,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr);
+bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 6a14497257e4..64386ee3f878 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -47,7 +47,7 @@
#define CTF_OFFSET_HBM 5
#define MSG_MAP(msg) \
- [SMU_MSG_##msg] = PPSMC_MSG_##msg
+ [SMU_MSG_##msg] = {1, PPSMC_MSG_##msg}
#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
FEATURE_DPM_GFXCLK_MASK | \
@@ -59,7 +59,7 @@
FEATURE_DPM_LINK_MASK | \
FEATURE_DPM_DCEFCLK_MASK)
-static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage),
MSG_MAP(GetSmuVersion),
MSG_MAP(GetDriverIfVersion),
@@ -145,7 +145,7 @@ static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetAVFSVoltageByDpm),
};
-static int vega20_clk_map[SMU_CLK_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(VCLK, PPCLK_VCLK),
CLK_MAP(DCLK, PPCLK_DCLK),
@@ -159,7 +159,7 @@ static int vega20_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(FCLK, PPCLK_FCLK),
};
-static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_UCLK),
@@ -195,7 +195,7 @@ static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(XGMI),
};
-static int vega20_table_map[SMU_TABLE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS),
@@ -208,12 +208,12 @@ static int vega20_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(OVERDRIVE),
};
-static int vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static int vega20_workload_map[] = {
+static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -225,79 +225,92 @@ static int vega20_workload_map[] = {
static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_TABLE_COUNT)
return -EINVAL;
- val = vega20_table_map[index];
- if (val >= TABLE_COUNT)
+ mapping = vega20_table_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_POWER_SOURCE_COUNT)
return -EINVAL;
- val = vega20_pwr_src_map[index];
- if (val >= POWER_SOURCE_COUNT)
+ mapping = vega20_pwr_src_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_FEATURE_COUNT)
return -EINVAL;
- val = vega20_feature_mask_map[index];
- if (val > 64)
+ mapping = vega20_feature_mask_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (index >= SMU_CLK_COUNT)
return -EINVAL;
- val = vega20_clk_map[index];
- if (val >= PPCLK_COUNT)
+ mapping = vega20_clk_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT)
return -EINVAL;
- val = vega20_message_map[index];
- if (val > PPSMC_Message_Count)
+ mapping = vega20_message_map[index];
+ if (!(mapping.valid_mapping)) {
return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{
- int val;
+ struct smu_11_0_cmn2aisc_mapping mapping;
+
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
return -EINVAL;
- val = vega20_workload_map[profile];
+ mapping = vega20_workload_map[profile];
+ if (!(mapping.valid_mapping)) {
+ return -EINVAL;
+ }
- return val;
+ return mapping.map_to;
}
static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
@@ -1770,7 +1783,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1803,6 +1816,9 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
@@ -1955,6 +1971,8 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
@@ -2840,157 +2858,6 @@ static int vega20_dpm_set_vce_enable(struct smu_context *smu, bool enable)
return smu_feature_set_enabled(smu, SMU_FEATURE_DPM_VCE_BIT, enable);
}
-static int vega20_get_enabled_smc_features(struct smu_context *smu,
- uint64_t *features_enabled)
-{
- uint32_t feature_mask[2] = {0, 0};
- int ret = 0;
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- *features_enabled = ((((uint64_t)feature_mask[0] << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
- (((uint64_t)feature_mask[1] << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
-
- return ret;
-}
-
-static int vega20_enable_smc_features(struct smu_context *smu,
- bool enable, uint64_t feature_mask)
-{
- uint32_t smu_features_low, smu_features_high;
- int ret = 0;
-
- smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
- smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
-
- if (enable) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- smu_features_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- smu_features_high);
- if (ret)
- return ret;
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- smu_features_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- smu_features_high);
- if (ret)
- return ret;
- }
-
- return 0;
-
-}
-
-static int vega20_get_ppfeature_status(struct smu_context *smu, char *buf)
-{
- static const char *ppfeature_name[] = {
- "DPM_PREFETCHER",
- "GFXCLK_DPM",
- "UCLK_DPM",
- "SOCCLK_DPM",
- "UVD_DPM",
- "VCE_DPM",
- "ULV",
- "MP0CLK_DPM",
- "LINK_DPM",
- "DCEFCLK_DPM",
- "GFXCLK_DS",
- "SOCCLK_DS",
- "LCLK_DS",
- "PPT",
- "TDC",
- "THERMAL",
- "GFX_PER_CU_CG",
- "RM",
- "DCEFCLK_DS",
- "ACDC",
- "VR0HOT",
- "VR1HOT",
- "FW_CTF",
- "LED_DISPLAY",
- "FAN_CONTROL",
- "GFX_EDC",
- "GFXOFF",
- "CG",
- "FCLK_DPM",
- "FCLK_DS",
- "MP1CLK_DS",
- "MP0CLK_DS",
- "XGMI",
- "ECC"};
- static const char *output_title[] = {
- "FEATURES",
- "BITMASK",
- "ENABLEMENT"};
- uint64_t features_enabled;
- int i;
- int ret = 0;
- int size = 0;
-
- ret = vega20_get_enabled_smc_features(smu, &features_enabled);
- if (ret)
- return ret;
-
- size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
- size += sprintf(buf + size, "%-19s %-22s %s\n",
- output_title[0],
- output_title[1],
- output_title[2]);
- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
- ppfeature_name[i],
- 1ULL << i,
- (features_enabled & (1ULL << i)) ? "Y" : "N");
- }
-
- return size;
-}
-
-static int vega20_set_ppfeature_status(struct smu_context *smu, uint64_t new_ppfeature_masks)
-{
- uint64_t features_enabled;
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
-
- if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
- return -EINVAL;
-
- ret = vega20_get_enabled_smc_features(smu, &features_enabled);
- if (ret)
- return ret;
-
- features_to_disable =
- features_enabled & ~new_ppfeature_masks;
- features_to_enable =
- ~features_enabled & new_ppfeature_masks;
-
- pr_debug("features_to_disable 0x%llx\n", features_to_disable);
- pr_debug("features_to_enable 0x%llx\n", features_to_enable);
-
- if (features_to_disable) {
- ret = vega20_enable_smc_features(smu, false, features_to_disable);
- if (ret)
- return ret;
- }
-
- if (features_to_enable) {
- ret = vega20_enable_smc_features(smu, true, features_to_enable);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static bool vega20_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -3153,6 +3020,9 @@ static int vega20_read_sensor(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
+ if(!data || !size)
+ return -EINVAL;
+
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3176,7 +3046,7 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- return -EINVAL;
+ ret = smu_smc_read_sensor(smu, sensor, data, size);
}
return ret;
@@ -3252,14 +3122,18 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
if (!range || !powerplay_table)
return -EINVAL;
- /* The unit is temperature */
- range->min = 0;
- range->max = powerplay_table->usSoftwareShutdownTemp;
- range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
- range->hotspot_crit_max = pptable->ThotspotLimit;
- range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
- range->mem_crit_max = pptable->ThbmLimit;
- range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
+ range->max = powerplay_table->usSoftwareShutdownTemp *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_crit_max = pptable->ThotspotLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = pptable->ThbmLimit *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
@@ -3302,8 +3176,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.force_dpm_limit_value = vega20_force_dpm_limit_value,
.unforce_dpm_levels = vega20_unforce_dpm_levels,
.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
- .set_ppfeature_status = vega20_set_ppfeature_status,
- .get_ppfeature_status = vega20_get_ppfeature_status,
.is_dpm_running = vega20_is_dpm_running,
.set_thermal_fan_table = vega20_set_thermal_fan_table,
.get_fan_speed_percent = vega20_get_fan_speed_percent,
@@ -3317,6 +3189,5 @@ void vega20_set_ppt_funcs(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
smu->ppt_funcs = &vega20_ppt_funcs;
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
smu_table->table_count = TABLE_COUNT;
}