summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/powerplay/smumgr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/smumgr')
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c)2198
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c308
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2498
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c2537
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2567
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2364
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h44
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2380
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c130
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c263
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c261
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3275
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3181
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c194
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h16
25 files changed, 12253 insertions, 10281 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index e7ad45297b1d..30d3089d7dba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -3,9 +3,9 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
- polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
- smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 51adf04ab4b3..4d672cd15785 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1,16 +1,9 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
@@ -18,90 +11,231 @@
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include <linux/types.h>
+#include "smumgr.h"
#include "pp_debug.h"
-#include "iceland_smc.h"
-#include "smu7_dyn_defaults.h"
-
+#include "ci_smumgr.h"
+#include "ppsmc.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-#include "smu71_discrete.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "processpptables.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
-#include "iceland_smumgr.h"
+#include "processpptables.h"
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
-#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
-#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
-#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
-
-static const struct iceland_pt_defaults defaults_iceland = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
- * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+
+static const struct ci_pt_defaults defaults_hawaii_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt = {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-/* 35W - XT, XTL */
-static const struct iceland_pt_defaults defaults_icelandxt = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
-};
-/* 25W - PRO, LE */
-static const struct iceland_pt_defaults defaults_icelandpro = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+static const struct ci_pt_defaults defaults_saturn_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+
+static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_addr, uint32_t limit)
+{
+ if ((0 != (3 & smc_addr))
+ || ((smc_addr + 3) >= limit)) {
+ pr_err("smc_addr invalid \n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ return 0;
+}
+
+static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ if ((3 & smc_start_address)
+ || ((smc_start_address + byte_count) >= limit)) {
+ pr_err("smc_start_address invalid \n");
+ return -EINVAL;
+ }
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC address space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ }
+
+ return 0;
+}
+
+
+static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+ return 0;
+}
+
+static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ int ret;
+
+ if (!ci_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
+
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+
+ return 0;
+}
+
+static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(hwmgr, msg);
+}
+
+static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
@@ -111,27 +245,282 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
dev_id = (uint32_t)sys_info.value;
switch (dev_id) {
- case DEVICE_ID_VI_ICELAND_M_6900:
- case DEVICE_ID_VI_ICELAND_M_6903:
- smu_data->power_tune_defaults = &defaults_icelandxt;
+ case 0x67BA:
+ case 0x66B1:
+ smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
-
- case DEVICE_ID_VI_ICELAND_M_6901:
- case DEVICE_ID_VI_ICELAND_M_6902:
- smu_data->power_tune_defaults = &defaults_icelandpro;
+ case 0x67B8:
+ case 0x66B0:
+ smu_data->power_tune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x6640:
+ case 0x6641:
+ case 0x6646:
+ case 0x6647:
+ smu_data->power_tune_defaults = &defaults_saturn_xt;
break;
+ case 0x6649:
+ case 0x6650:
+ case 0x6651:
+ case 0x6658:
+ case 0x665C:
+ case 0x665D:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
default:
- smu_data->power_tune_defaults = &defaults_iceland;
- pr_warn("Unknown V.I. Device ID.\n");
+ smu_data->power_tune_defaults = &defaults_bonaire_xt;
break;
}
- return;
}
-static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ss_info;
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ss_info)) {
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ss_info.speed_spectrum_rate);
+ uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+}
+
+static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ if (clock < min) {
+ pr_info("Engine clock can't satisfy stutter requirement!\n");
+ return 0;
+ }
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU7_Discrete_GraphicsLevel *level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+
+ result = ci_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
+ (uint32_t *)(&level->MinVddc));
+ if (result) {
+ pr_err("vdd_dep_on_sclk table is NULL\n");
+ return result;
+ }
+
+ level->SclkFrequency = clock;
+ level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ clock,
+ &level->MinVddcPhases);
+
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ level->EnabledForThrottle = 1;
+ level->UpH = 0;
+ level->DownH = 0;
+ level->VoltageDownH = 0;
+ level->PowerThrottle = 0;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId =
+ ci_get_sleep_divider_id_from_clock(clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result = 0;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = ci_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ if (i == (dpm_table->sclk_table.count - 1))
+ smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ result = ci_copy_bytes_to_smc(hwmgr, array,
+ (u8 *)levels, array_size,
+ SMC_RAM_END);
+
+ return result;
+
+}
+
+static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
@@ -141,11 +530,11 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
smu_data->power_tune_table.TDC_VDDC_PkgLimit =
@@ -157,15 +546,15 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (ci_read_smc_sram_dword(hwmgr,
fuse_table_offset +
- offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
@@ -176,52 +565,29 @@ static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offs
return 0;
}
-static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- return 0;
-}
-
-static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 8; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
+ uint16_t tmp;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- return 0;
-}
-
-static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
+ else
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
return 0;
}
-static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
"The CAC Leakage table does not exist!", return -EINVAL);
@@ -230,22 +596,24 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
"CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
- for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
+ } else {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
}
- } else {
- PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
}
return 0;
}
-static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *vid = smu_data->power_tune_table.VddCVid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -253,117 +621,154 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
"There should never be more than 8 entries for VddcVid!!!",
return -EINVAL);
- for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
}
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
+ smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
+
return 0;
}
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+ return 0;
+}
+
+static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
+ int ret = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
+ if (ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END)) {
+ pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
+ return -EINVAL;
+ }
/* DW0 - DW3 */
- if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate bapm vddc vid Failed!",
- return -EINVAL);
-
+ ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
/* DW4 - DW5 */
- if (iceland_populate_vddc_vid(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate vddc vid Failed!",
- return -EINVAL);
-
+ ret |= ci_populate_vddc_vid(hwmgr);
/* DW6 */
- if (iceland_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
+ ret |= ci_populate_svi_load_line(hwmgr);
/* DW7 */
- if (iceland_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ ret |= ci_populate_tdc_limit(hwmgr);
/* DW8 */
- if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != iceland_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW16 */
- if (iceland_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW17 */
- if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW18 */
- if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
+
+ ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
+ sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
}
- return 0;
+ return ret;
}
-static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, uint32_t *vol)
+static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
- uint32_t i = 0;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *vol = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
+ dpm_table->DTETjOffset = 0;
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
}
- /* sclk is bigger than max sclk in the dependence table */
- *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
return 0;
}
-static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
uint16_t *lo)
{
@@ -372,7 +777,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
*hi = tab->value * VOLTAGE_SCALE;
*lo = tab->value * VOLTAGE_SCALE;
- /* SCLK/VDDC Dependency Table has to exist. */
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
"The SCLK/VDDC Dependency Table does not exist.\n",
return -EINVAL);
@@ -382,11 +786,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
- /*
- * Since voltage in the sclk/vddc dependency table is not
- * necessarily in ascending order because of ELB voltage
- * patching, loop through entire list to find exact voltage.
- */
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
vol_found = true;
@@ -402,10 +801,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
}
}
- /*
- * If voltage is not found in the first pass, loop again to
- * find the best match, equal or higher value.
- */
if (!vol_found) {
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
@@ -429,29 +824,29 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab,
- SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+ SMU7_Discrete_VoltageLevel *smc_voltage_tab)
{
int result;
- result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ result = ci_get_std_voltage_value_sidd(hwmgr, tab,
&smc_voltage_tab->StdVoltageHiSidd,
&smc_voltage_tab->StdVoltageLoSidd);
- if (0 != result) {
+ if (result) {
smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
}
smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
- CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
return 0;
}
-static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
unsigned int count;
int result;
@@ -459,7 +854,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
table->VddcLevelCount = data->vddc_voltage_table.count;
for (count = 0; count < table->VddcLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddc_voltage_table.entries[count]),
&(table->VddcLevel[count]));
PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
@@ -467,7 +862,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
/* GPIO voltage control */
if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ else
table->VddcLevel[count].Smio = 0;
}
@@ -476,8 +871,8 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -486,7 +881,7 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
table->VddciLevelCount = data->vddci_voltage_table.count;
for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddci_voltage_table.entries[count]),
&(table->VddciLevel[count]));
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
@@ -501,8 +896,8 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -510,8 +905,8 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->mvdd_voltage_table.entries[count]),
&table->MvddLevel[count]);
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
@@ -527,28 +922,28 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
}
-static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result;
- result = iceland_populate_smc_vddc_table(hwmgr, table);
+ result = ci_populate_smc_vddc_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDC voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ result = ci_populate_smc_vdd_ci_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDCI voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ result = ci_populate_smc_mvdd_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate MVDD voltage table to SMC", return -EINVAL);
return 0;
}
-static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU71_Discrete_Ulv *state)
+static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU7_Discrete_Ulv *state)
{
uint32_t voltage_response_time, ulv_voltage;
int result;
@@ -591,33 +986,28 @@ static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_Ulv *ulv_level)
+static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_Ulv *ulv_level)
{
- return iceland_populate_ulv_level(hwmgr, ulv_level);
+ return ci_populate_ulv_level(hwmgr, ulv_level);
}
-static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i;
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
table->LinkLevel[i].PcieGenSpeed =
(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
table->LinkLevel[i].PcieLaneCount =
(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
}
smu_data->smc_state_table.LinkLevelCount =
@@ -628,294 +1018,15 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret
return 0;
}
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
- const struct phm_phase_shedding_limits_table *pl,
- uint32_t sclk, uint32_t *p_shed)
-{
- unsigned int i;
-
- /* use the minimum phase shedding */
- *p_shed = 1;
-
- for (i = 0; i < pl->count; i++) {
- if (sclk < pl->entries[i].Sclk) {
- *p_shed = i;
- break;
- }
- }
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU71_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
- &graphic_level->MinVddc);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- graphic_level->MinVddcPhases = 1;
-
- if (data->vddc_phase_shed_control)
- iceland_populate_phase_value_based_on_sclk(hwmgr,
- hwmgr->dyn_state.vddc_phase_shed_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 100;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
- SMU71_MAX_LEVELS_GRAPHICS;
-
- SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = iceland_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (highest_pcie_level_enabled + 1))) != 0) {
- highest_pcie_level_enabled++;
- }
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
- count++;
- }
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_mclk_params(
+static int ci_calculate_mclk_params(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *mclk,
+ SMU7_Discrete_MemoryLevel *mclk,
bool strobe_mode,
bool dllStateOn
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
@@ -934,10 +1045,8 @@ static int iceland_calculate_mclk_params(
PP_ASSERT_WITH_CODE(0 == result,
"Error retrieving Memory Clock Parameters from VBIOS.", return result);
- /* MPLL_FUNC_CNTL setup*/
mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
- /* MPLL_FUNC_CNTL_1 setup*/
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
@@ -945,12 +1054,10 @@ static int iceland_calculate_mclk_params(
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
- /* MPLL_AD_FUNC_CNTL setup*/
mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
@@ -959,21 +1066,6 @@ static int iceland_calculate_mclk_params(
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
pp_atomctrl_internal_ss_info ss_info;
uint32_t freq_nom;
uint32_t tmp;
@@ -990,14 +1082,7 @@ static int iceland_calculate_mclk_params(
tmp = tmp * tmp;
if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
uint32_t clkv =
(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
@@ -1007,7 +1092,6 @@ static int iceland_calculate_mclk_params(
}
}
- /* MCLK_PWRMGT_CNTL setup */
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
@@ -1016,7 +1100,6 @@ static int iceland_calculate_mclk_params(
MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
- /* Save the result data to outpupt memory level structure */
mclk->MclkFrequency = memory_clock;
mclk->MpllFuncCntl = mpll_func_cntl;
mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
@@ -1031,48 +1114,45 @@ static int iceland_calculate_mclk_params(
return 0;
}
-static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
bool strobe_mode)
{
uint8_t mc_para_index;
if (strobe_mode) {
- if (memory_clock < 12500) {
+ if (memory_clock < 12500)
mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
+ else if (memory_clock > 47500)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
} else {
- if (memory_clock < 65000) {
+ if (memory_clock < 65000)
mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
+ else if (memory_clock > 135000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
}
return mc_para_index;
}
-static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
{
uint8_t mc_para_index;
- if (memory_clock < 10000) {
+ if (memory_clock < 10000)
mc_para_index = 0;
- } else if (memory_clock >= 80000) {
+ else if (memory_clock >= 80000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
return mc_para_index;
}
-static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
uint32_t memory_clock, uint32_t *p_shed)
{
unsigned int i;
@@ -1089,10 +1169,10 @@ static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, co
return 0;
}
-static int iceland_populate_single_memory_level(
+static int ci_populate_single_memory_level(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *memory_level
+ SMU7_Discrete_MemoryLevel *memory_level
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1104,16 +1184,14 @@ static int iceland_populate_single_memory_level(
uint32_t mclk_strobe_mode_threshold = 40000;
if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
}
- if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
- memory_level->MinVddci = memory_level->MinVddc;
- } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddci_dependency_on_mclk,
memory_clock,
&memory_level->MinVddci);
@@ -1121,18 +1199,27 @@ static int iceland_populate_single_memory_level(
"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
}
+ if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.mvdd_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
+ }
+
memory_level->MinVddcPhases = 1;
if (data->vddc_phase_shed_control) {
- iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
memory_clock, &memory_level->MinVddcPhases);
}
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
@@ -1148,7 +1235,7 @@ static int iceland_populate_single_memory_level(
cgs_get_active_displays_info(hwmgr->device, &info);
data->display_timing.num_existing_displays = info.display_count;
- /* stutter mode not support on iceland */
+ /* stutter mode not support on ci */
/* decide strobe mode*/
memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
@@ -1156,7 +1243,7 @@ static int iceland_populate_single_memory_level(
/* decide EDC mode and memory clock ratio*/
if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
memory_level->StrobeEnable);
if ((mclk_edc_enable_threshold != 0) &&
@@ -1170,7 +1257,7 @@ static int iceland_populate_single_memory_level(
}
if (memory_level->StrobeEnable) {
- if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
else
@@ -1179,11 +1266,11 @@ static int iceland_populate_single_memory_level(
dll_state_on = data->dll_default_on;
} else {
memory_level->StrobeRatio =
- iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ ci_get_ddr3_mclk_frequency_ratio(memory_clock);
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
}
- result = iceland_calculate_mclk_params(hwmgr,
+ result = ci_calculate_mclk_params(hwmgr,
memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
if (0 == result) {
@@ -1209,23 +1296,18 @@ static int iceland_populate_single_memory_level(
return result;
}
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
- SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
uint32_t i;
memset(levels, 0x00, level_array_size);
@@ -1233,39 +1315,42 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->mclk_table.count; i++) {
PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
"can not populate memory level as memory clock is zero", return -EINVAL);
- result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
&(smu_data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
+ if (0 != result)
return result;
- }
}
- /* Only enable level 0 for now.*/
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ if ((dpm_table->mclk_table.count >= 2)
+ && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
+ smu_data->smc_state_table.MemoryLevel[1].MinVddci =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddci;
+ smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
+ smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+ }
smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ result = ci_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
return result;
}
-static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
- SMU71_Discrete_VoltageLevel *voltage)
+static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
{
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1291,15 +1376,14 @@ static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
return 0;
}
-static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t vddc_phase_shed_control = 0;
- SMU71_Discrete_VoltageLevel voltage_level;
+ SMU7_Discrete_VoltageLevel voltage_level;
uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
@@ -1314,7 +1398,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
else
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
- table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
/* assign zero for now*/
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
@@ -1346,7 +1430,6 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->ACPILevel.CcPwrDynRm = 0;
table->ACPILevel.CcPwrDynRm1 = 0;
-
/* For various features to be enabled/disabled while this level is active.*/
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
/* SCLK frequency in units of 10KHz*/
@@ -1360,6 +1443,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
@@ -1373,7 +1457,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
}
- if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
table->MemoryACPILevel.MinMvdd =
PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
else
@@ -1418,9 +1502,9 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.EnabledForThrottle = 0;
table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
@@ -1433,35 +1517,145 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = 0;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ table->UvdLevelCount = (uint8_t)(uvd_table->count);
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ uvd_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ uvd_table->entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ uvd_table->entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
+ }
+
+ return result;
}
-static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ table->VceLevelCount = (uint8_t)(vce_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ vce_table->entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_acp_clock_voltage_dependency_table *acp_table =
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+
+ table->AcpLevelCount = (uint8_t)(acp_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
+ table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_samu_clock_voltage_dependency_table *samu_table =
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(samu_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+ table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_memory_timing_parameters(
+static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
uint32_t memory_clock,
- struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
)
{
uint32_t dramTiming;
@@ -1486,42 +1680,34 @@ static int iceland_populate_memory_timing_parameters(
return 0;
}
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
- SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
- memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+ memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = iceland_populate_memory_timing_parameters
+ result = ci_populate_memory_timing_parameters
(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
data->dpm_table.mclk_table.dpm_levels[j].value,
&arb_regs.entries[i][j]);
- if (0 != result) {
+ if (0 != result)
break;
- }
}
}
if (0 == result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->arb_table_start,
(uint8_t *)&arb_regs,
- sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
SMC_RAM_END
);
}
@@ -1529,12 +1715,13 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
return result;
}
-static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
@@ -1562,26 +1749,22 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
}
table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- table->BootVddci = table->BootVddc;
- else
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
-
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
return result;
}
-static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU71_Discrete_MCRegisters *mc_reg_table)
+static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
{
- const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+ const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
uint32_t i, j;
for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
mc_reg_table->address[i].s0 =
PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
@@ -1596,10 +1779,9 @@ static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
return 0;
}
-/*convert register values from driver to SMC format */
-static void iceland_convert_mc_registers(
- const struct iceland_mc_reg_entry *entry,
- SMU71_Discrete_MCRegisterSet *data,
+static void ci_convert_mc_registers(
+ const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
uint32_t num_entries, uint32_t valid_flag)
{
uint32_t i, j;
@@ -1612,13 +1794,13 @@ static void iceland_convert_mc_registers(
}
}
-static int iceland_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
+static int ci_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
const uint32_t memory_clock,
- SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data
)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i = 0;
for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
@@ -1631,15 +1813,15 @@ static int iceland_convert_mc_reg_table_entry_to_smc(
if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
--i;
- iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
mc_reg_table_data, smu_data->mc_reg_table.last,
smu_data->mc_reg_table.validflag);
return 0;
}
-static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_MCRegisters *mc_regs)
+static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_regs)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1647,8 +1829,8 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = iceland_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
+ res = ci_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
data->dpm_table.mclk_table.dpm_levels[i].value,
&mc_regs->data[i]
);
@@ -1660,10 +1842,9 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t address;
int32_t result;
@@ -1672,45 +1853,43 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
return 0;
- memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+ memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
if (result != 0)
return result;
+ address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
- address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ return ci_copy_bytes_to_smc(hwmgr, address,
(uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
SMC_RAM_END);
}
-static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
- result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
+ result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for the MC register addresses!", return result;);
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for driver state!", return result;);
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+ return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
}
-static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t count, level;
count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
@@ -1736,107 +1915,48 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
- struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
- const uint16_t *def1, *def2;
- int i, j, k;
-
-
- /*
- * TDP number of fraction bits are changed from 8 to 7 for Iceland
- * as requested by SMC team
- */
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-
- dpm_table->DTETjOffset = 0;
-
- dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- /* The following are for new Iceland Multi-input fan/thermal control */
- if (NULL != ppm) {
- dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
- dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
- } else {
- dpm_table->PPM_PkgPwrLimit = 0;
- dpm_table->PPM_TemperatureLimit = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
-
- dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- def1 = defaults->bapmti_r;
- def2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU71_DTE_SOURCES; j++) {
- for (k = 0; k < SMU71_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
- dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
- def1++;
- def2++;
- }
- }
- }
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
return 0;
}
-static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *tab)
+static int ci_start_smc(struct pp_hwmgr *hwmgr)
{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ /* set smc instruct start point at 0x0 */
+ ci_program_jump_on_start(hwmgr);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
- tab->SVI2Enable |= VDDC_ON_SVI2;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- tab->SVI2Enable |= VDDCI_ON_SVI2;
- else
- tab->MergedVddci = 1;
+ /* enable smc clock */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
- tab->SVI2Enable |= MVDD_ON_SVI2;
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
- (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
return 0;
}
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ u32 i;
- iceland_initialize_power_tune_defaults(hwmgr);
+ ci_initialize_power_tune_defaults(hwmgr);
memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- iceland_populate_smc_voltage_tables(hwmgr, table);
- }
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ ci_populate_smc_voltage_tables(hwmgr, table);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
@@ -1850,67 +1970,75 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
if (data->is_memory_gddr5)
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
if (data->ulv_supported) {
- result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
+ "Failed to initialize ULV state!", return result);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixCG_ULV_PARAMETER, 0x40035);
}
- result = iceland_populate_smc_link_level(hwmgr, table);
+ result = ci_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
+ "Failed to initialize Graphics Level!", return result);
- result = iceland_populate_all_graphic_levels(hwmgr);
+ result = ci_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
+ "Failed to initialize Memory Level!", return result);
- result = iceland_populate_all_memory_levels(hwmgr);
+ result = ci_populate_smc_link_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
+ "Failed to initialize Link Level!", return result);
- result = iceland_populate_smc_acpi_level(hwmgr, table);
+ result = ci_populate_smc_acpi_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
+ "Failed to initialize ACPI Level!", return result);
- result = iceland_populate_smc_vce_level(hwmgr, table);
+ result = ci_populate_smc_vce_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
+ "Failed to initialize VCE Level!", return result);
- result = iceland_populate_smc_acp_level(hwmgr, table);
+ result = ci_populate_smc_acp_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
+ "Failed to initialize ACP Level!", return result);
- result = iceland_populate_smc_samu_level(hwmgr, table);
+ result = ci_populate_smc_samu_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
+ "Failed to initialize SAMU Level!", return result);
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
- result = iceland_program_memory_timing_parameters(hwmgr);
+ result = ci_program_memory_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
+ "Failed to Write ARB settings for the initial state.", return result);
- result = iceland_populate_smc_uvd_level(hwmgr, table);
+ result = ci_populate_smc_uvd_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
+ "Failed to initialize UVD Level!", return result);
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
- result = iceland_populate_smc_boot_level(hwmgr, table);
+ result = ci_populate_smc_boot_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result;);
+ "Failed to initialize Boot Level!", return result);
- result = iceland_populate_smc_initial_state(hwmgr);
+ result = ci_populate_smc_initial_state(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
- result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
table->GraphicsInterval = 1;
@@ -1927,17 +2055,35 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryVoltageChangeEnable = 1;
table->MemoryInterval = 1;
table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
table->PCIeGenInterval = 1;
- result = iceland_populate_smc_svi2_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate SVI2 setting!", return result);
+ ci_populate_smc_svi2_config(hwmgr, table);
+
+ for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
+ CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
@@ -1947,6 +2093,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
@@ -1955,47 +2102,32 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
- SMC_RAM_END);
+ result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
+ SMC_RAM_END);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload dpm data to SMC memory!", return result;);
- /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.ulv_setting_starts,
- (uint8_t *)&(smu_data->ulv_setting),
- sizeof(SMU71_Discrete_Ulv),
- SMC_RAM_END);
-
-
- result = iceland_populate_initial_mc_reg_table(hwmgr);
+ result = ci_populate_initial_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate initialize MC Reg table!", return result);
- result = iceland_populate_pm_fuses(hwmgr);
+ result = ci_populate_pm_fuses(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate PM fuses to SMC memory!", return result);
+ ci_start_smc(hwmgr);
+
return 0;
}
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
@@ -2012,7 +2144,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
- if (0 == smu7_data->fan_table_start) {
+ if (0 == ci_data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
@@ -2062,29 +2194,26 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
- /* fan_table.FanControl_GL_Flag = 1; */
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+ res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
return 0;
}
-
-static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return iceland_program_memory_timing_parameters(hwmgr);
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return ci_program_memory_timing_parameters(hwmgr);
return 0;
}
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2100,21 +2229,21 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable,
- LowSclkInterruptThreshold),
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable,
+ LowSclkInterruptT),
(uint8_t *)&low_sclk_interrupt_threshold,
sizeof(uint32_t),
SMC_RAM_END);
}
- result = iceland_update_and_upload_mc_reg_table(hwmgr);
+ result = ci_update_and_upload_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
- result = iceland_program_mem_timing_parameters(hwmgr);
+ result = ci_program_mem_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE((result == 0),
"Failed to program memory timing parameters!",
);
@@ -2122,161 +2251,200 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
return result;
}
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
{
switch (type) {
case SMU_SoftRegisters:
switch (member) {
case HandshakeDisables:
- return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ return offsetof(SMU7_SoftRegisters, HandshakeDisables);
case VoltageChangeTimeout:
- return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
- return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
case PreVBlankGap:
- return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ return offsetof(SMU7_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
- return offsetof(SMU71_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ return offsetof(SMU7_SoftRegisters, VBlankTimeout);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
case SMU_Discrete_DpmTable:
switch (member) {
case LowSclkInterruptThreshold:
- return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
}
}
- pr_warn("can't get the offset of type %x member %x\n", type, member);
+ pr_debug("can't get the offset of type %x member %x\n", type, member);
return 0;
}
-uint32_t iceland_get_mac_definition(uint32_t value)
+static uint32_t ci_get_mac_definition(uint32_t value)
{
switch (value) {
case SMU_MAX_LEVELS_GRAPHICS:
- return SMU71_MAX_LEVELS_GRAPHICS;
+ return SMU7_MAX_LEVELS_GRAPHICS;
case SMU_MAX_LEVELS_MEMORY:
- return SMU71_MAX_LEVELS_MEMORY;
+ return SMU7_MAX_LEVELS_MEMORY;
case SMU_MAX_LEVELS_LINK:
- return SMU71_MAX_LEVELS_LINK;
+ return SMU7_MAX_LEVELS_LINK;
case SMU_MAX_ENTRIES_SMIO:
- return SMU71_MAX_ENTRIES_SMIO;
+ return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
- return SMU71_MAX_LEVELS_VDDC;
+ return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
- return SMU71_MAX_LEVELS_VDDCI;
+ return SMU7_MAX_LEVELS_VDDCI;
case SMU_MAX_LEVELS_MVDD:
- return SMU71_MAX_LEVELS_MVDD;
+ return SMU7_MAX_LEVELS_MVDD;
}
- pr_warn("can't get the mac of %x\n", value);
+ pr_debug("can't get the mac of %x\n", value);
return 0;
}
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
+{
+ uint32_t byte_count, start_addr;
+ uint8_t *src;
+ uint32_t data;
+
+ struct cgs_firmware_info info = {0};
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+ hwmgr->is_kicker = info.is_kicker;
+ byte_count = info.image_size;
+ src = (uint8_t *)info.kptr;
+ start_addr = info.ucode_start_address;
+
+ if (byte_count > SMC_RAM_END) {
+ pr_err("SMC address is beyond the SMC RAM area.\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ for (; byte_count >= 4; byte_count -= 4) {
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ }
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ if (0 != byte_count) {
+ pr_err("SMC size must be divisible by 4\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+ if (ci_is_smc_ram_running(hwmgr)) {
+ pr_info("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
+ boot_seq_done, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
+ pre_fetcher_en, 1);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ return ci_load_smc_ucode(hwmgr);
+}
+
+static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- uint32_t tmp;
+ uint32_t tmp = 0;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, DpmTable),
+ if (ci_upload_firmware(hwmgr))
+ return -EINVAL;
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->dpm_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->dpm_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, SoftRegisters),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
if (0 == result) {
data->soft_regs_start = tmp;
- smu7_data->soft_regs_start = tmp;
+ ci_data->soft_regs_start = tmp;
}
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->mc_reg_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, FanTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->fan_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->fan_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->arb_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->arb_table_start = tmp;
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, Version),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, Version),
&tmp, SMC_RAM_END);
- if (0 == result) {
+ if (0 == result)
hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, UlvSettings),
- &tmp, SMC_RAM_END);
-
- if (0 == result) {
- smu7_data->ulv_setting_starts = tmp;
- }
error |= (0 != result);
return error ? 1 : 0;
}
-/*---------------------------MC----------------------------*/
-
-static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
{
return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
}
-static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
{
bool result = true;
@@ -2369,32 +2537,32 @@ static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
return result;
}
-static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
{
uint32_t i;
uint16_t address;
for (i = 0; i < table->last; i++) {
table->mc_reg_address[i].s0 =
- iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
? address : table->mc_reg_address[i].s1;
}
return 0;
}
-static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct iceland_mc_reg_table *ni_table)
+static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct ci_mc_reg_table *ni_table)
{
uint8_t i, j;
- PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
"Invalid VramInfo table.", return -EINVAL);
- for (i = 0; i < table->last; i++) {
+ for (i = 0; i < table->last; i++)
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
+
ni_table->last = table->last;
for (i = 0; i < table->num_entries; i++) {
@@ -2411,26 +2579,15 @@ static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *tabl
return 0;
}
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct iceland_mc_reg_table *table)
+static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct ci_mc_reg_table *table)
{
uint8_t i, j, k;
uint32_t temp_reg;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
switch (table->mc_reg_address[i].s1) {
@@ -2445,7 +2602,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
@@ -2456,15 +2613,14 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -2472,7 +2628,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
}
@@ -2488,7 +2644,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
break;
@@ -2503,14 +2659,15 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+static int ci_set_valid_flag(struct ci_mc_reg_table *table)
{
uint8_t i, j;
+
for (i = 0; i < table->last; i++) {
for (j = 1; j < table->num_entries; j++) {
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
+ table->validflag |= (1 << i);
break;
}
}
@@ -2519,13 +2676,13 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
return 0;
}
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *table;
- struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+ struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = ci_get_memory_modile_index(hwmgr);
table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
@@ -2559,24 +2716,103 @@ int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
- result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+ result = ci_copy_vbios_smc_reg_table(table, ni_table);
if (0 == result) {
- iceland_set_s0_mc_reg_index(ni_table);
- result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ ci_set_s0_mc_reg_index(ni_table);
+ result = ci_set_mc_special_registers(hwmgr, ni_table);
}
if (0 == result)
- iceland_set_valid_flag(ni_table);
+ ci_set_valid_flag(ni_table);
kfree(table);
return result;
}
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
+
+static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpH = request->up_hyst;
+ levels[i].DownH = request->down_hyst;
+ }
+
+ return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
+
+static int ci_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *ci_priv = NULL;
+
+ ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
+
+ if (ci_priv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ ci_priv->activity_target[i] = 30;
+
+ hwmgr->smu_backend = ci_priv;
+
+ return 0;
+}
+
+static int ci_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
+
+static int ci_start_smu(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+const struct pp_smumgr_func ci_smu_funcs = {
+ .smu_init = ci_smu_init,
+ .smu_fini = ci_smu_fini,
+ .start_smu = ci_start_smu,
+ .check_fw_load_finish = NULL,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = ci_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = ci_get_offsetof,
+ .process_firmware_header = ci_process_firmware_header,
+ .init_smc_table = ci_init_smc_table,
+ .update_sclk_threshold = ci_update_sclk_threshold,
+ .thermal_setup_fan_table = ci_thermal_setup_fan_table,
+ .populate_all_graphic_levels = ci_populate_all_graphic_levels,
+ .populate_all_memory_levels = ci_populate_all_memory_levels,
+ .get_mac_definition = ci_get_mac_definition,
+ .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+ .is_dpm_running = ci_is_dpm_running,
+ .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
new file mode 100644
index 000000000000..8189cfa17c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _CI_SMUMANAGER_H_
+#define _CI_SMUMANAGER_H_
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#include "smu7_discrete.h"
+#include <pp_endian.h>
+#include "ppatomctrl.h"
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+struct ci_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ uint8_t last;
+ uint8_t num_entries;
+ uint16_t validflag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_smumgr {
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ struct SMU7_Discrete_DpmTable smc_state_table;
+ struct SMU7_Discrete_PmFuses power_tune_table;
+ const struct ci_pt_defaults *power_tune_defaults;
+ SMU7_Discrete_MCRegisters mc_regs;
+ struct ci_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 652aaa43e95c..78ab0556e48f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = {
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int cz_smum_get_argument(struct pp_smumgr *smumgr)
+static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
+ return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
- uint16_t msg)
+static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
return result;
}
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
return 0;
}
/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- result = cz_send_msg_to_smc_async(smumgr, msg);
+ result = cz_send_msg_to_smc_async(hwmgr, msg);
if (result != 0)
return result;
- return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
}
-static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
+static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t limit)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
if (0 != (3 & smc_address)) {
@@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
return -EINVAL;
}
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
SMN_MP1_SRAM_START_ADDR + smc_address);
return 0;
}
-static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
+static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t value, uint32_t limit)
{
int result;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = cz_set_smc_sram_address(smumgr, smc_address, limit);
+ result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
if (!result)
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
return result;
}
-static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc(smumgr, msg);
+ return cz_send_msg_to_smc(hwmgr, msg);
}
-static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
+static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
int i;
@@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- for (i = 0; i < smumgr->usec_timeout; i++) {
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
if (firmware ==
- (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
break;
udelay(1);
}
- if (i >= smumgr->usec_timeout) {
+ if (i >= hwmgr->usec_timeout) {
pr_err("SMU check loaded firmware failed.\n");
return -EINVAL;
}
@@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
return 0;
}
-static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
+static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
{
uint32_t reg_data;
uint32_t tmp;
@@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
struct cgs_firmware_info info = {0};
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
- ret = cgs_get_firmware_info(smumgr->device,
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
if (ret)
return -EINVAL;
/* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_MEC_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_CPC_IC_BASE_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
reg_data = smu_lower_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
reg_data = smu_upper_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
return 0;
}
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
enum cz_scratch_entry firmware_enum)
{
uint8_t ret = 0;
@@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_SDMA0;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_SDMA0;
else
ret = UCODE_ID_SDMA1;
@@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_CP_MEC_JT1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_CP_MEC_JT1;
else
ret = UCODE_ID_CP_MEC_JT2;
@@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
}
static int cz_smu_populate_single_scratch_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
uint8_t type, bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task(
}
static int cz_smu_populate_single_ucode_load_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task(
return 0;
}
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_SAVE, true);
return 0;
}
-static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
+static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
{
int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
@@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_SAVE, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_SAVE, true);
@@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
}
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
else
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
/* populate scratch */
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_LOAD, true);
return 0;
}
-static int cz_smu_construct_toc_for_power_profiling(
- struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
return 0;
}
-static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(smumgr);
- cz_smu_construct_toc_for_rlc_aram_save(smumgr);
- cz_smu_construct_toc_for_vddgfx_enter(smumgr);
- cz_smu_construct_toc_for_vddgfx_exit(smumgr);
- cz_smu_construct_toc_for_power_profiling(smumgr);
- cz_smu_construct_toc_for_bootup(smumgr);
- cz_smu_construct_toc_for_clock_table(smumgr);
+ cz_smu_initialize_toc_empty_job_list(hwmgr);
+ cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ cz_smu_construct_toc_for_power_profiling(hwmgr);
+ cz_smu_construct_toc_for_bootup(hwmgr);
+ cz_smu_construct_toc_for_clock_table(hwmgr);
return 0;
}
-static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
+static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
uint32_t firmware_type;
uint32_t i;
int ret;
@@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
- firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
+ firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
firmware_list[i]);
ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
ucode_id, &info);
if (ret == 0) {
@@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
}
static int cz_smu_populate_single_scratch_entry(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry scratch_type,
uint32_t ulsize_byte,
struct cz_buffer_entry *entry)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
long long mc_addr =
((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
| cz_smu->smu_buffer.mc_addr_low;
@@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry(
return 0;
}
-static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
+static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
return 0;
}
-static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
+static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
break;
}
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
return 0;
}
-static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
+static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
uint32_t smc_address;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
- cz_smu_populate_firmware_entries(smumgr);
+ cz_smu_populate_firmware_entries(hwmgr);
- cz_smu_construct_toc(smumgr);
+ cz_smu_construct_toc(hwmgr);
smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
+ cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
cz_smu->toc_buffer.mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
cz_smu->toc_buffer.mc_addr_low);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- return cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
}
-static int cz_start_smu(struct pp_smumgr *smumgr)
+static int cz_start_smu(struct pp_hwmgr *hwmgr)
{
int ret = 0;
uint32_t fw_to_check = 0;
@@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK;
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- ret = cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(hwmgr);
if (ret)
pr_err("SMU firmware load failed\n");
- cz_check_fw_load_finish(smumgr, fw_to_check);
+ cz_check_fw_load_finish(hwmgr, fw_to_check);
- ret = cz_load_mec_firmware(smumgr);
+ ret = cz_load_mec_firmware(hwmgr);
if (ret)
pr_err("Mec Firmware load failed\n");
return ret;
}
-static int cz_smu_init(struct pp_smumgr *smumgr)
+static int cz_smu_init(struct pp_hwmgr *hwmgr)
{
uint64_t mc_addr = 0;
int ret = 0;
@@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
if (cz_smu == NULL)
return -ENOMEM;
- smumgr->backend = cz_smu;
+ hwmgr->smu_backend = cz_smu;
cz_smu->toc_buffer.data_size = 4096;
cz_smu->smu_buffer.data_size =
@@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->toc_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_fini(struct pp_smumgr *smumgr)
+static int cz_smu_fini(struct pp_hwmgr *hwmgr)
{
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
if (cz_smu) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->toc_buffer.handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->smu_buffer.handle);
kfree(cz_smu);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
deleted file mode 100644
index 8712f093d6d9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ /dev/null
@@ -1,2498 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "fiji_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "fiji_smumgr.h"
-#include "pppcielanes.h"
-#include "smu7_ppsmc.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define VDDC_VDDCI_DELTA 300
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- *voltage = *mvdd = 0;
-
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
-{
- switch (line) {
- case SMU7_I2CLineID_DDC1:
- *scl = SMU7_I2C_DDC1CLK;
- *sda = SMU7_I2C_DDC1DATA;
- break;
- case SMU7_I2CLineID_DDC2:
- *scl = SMU7_I2C_DDC2CLK;
- *sda = SMU7_I2C_DDC2DATA;
- break;
- case SMU7_I2CLineID_DDC3:
- *scl = SMU7_I2C_DDC3CLK;
- *sda = SMU7_I2C_DDC3DATA;
- break;
- case SMU7_I2CLineID_DDC4:
- *scl = SMU7_I2C_DDC4CLK;
- *sda = SMU7_I2C_DDC4DATA;
- break;
- case SMU7_I2CLineID_DDC5:
- *scl = SMU7_I2C_DDC5CLK;
- *sda = SMU7_I2C_DDC5DATA;
- break;
- case SMU7_I2CLineID_DDC6:
- *scl = SMU7_I2C_DDC6CLK;
- *sda = SMU7_I2C_DDC6DATA;
- break;
- case SMU7_I2CLineID_SCLSDA:
- *scl = SMU7_I2C_SCL;
- *sda = SMU7_I2C_SDA;
- break;
- case SMU7_I2CLineID_DDCVGA:
- *scl = SMU7_I2C_DDCVGACLK;
- *sda = SMU7_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-}
-
-static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- uint32_t pm_fuse_table_offset;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
-
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- (uint32_t *)(&level->MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000)
- return 0x0;
- if (mem_clock <= 15000)
- return 0x1;
- if (mem_clock <= 20000)
- return 0x2;
- if (mem_clock <= 25000)
- return 0x3;
- if (mem_clock <= 30000)
- return 0x4;
- if (mem_clock <= 35000)
- return 0x5;
- if (mem_clock <= 40000)
- return 0x6;
- if (mem_clock <= 45000)
- return 0x7;
- if (mem_clock <= 50000)
- return 0x8;
- if (mem_clock <= 55000)
- return 0x9;
- if (mem_clock <= 60000)
- return 0xa;
- if (mem_clock <= 65000)
- return 0xb;
- if (mem_clock <= 70000)
- return 0xc;
- if (mem_clock <= 75000)
- return 0xd;
- if (mem_clock <= 80000)
- return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",
- );
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- uint32_t mclk_stutter_mode_threshold = 60000;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value " \
- "in Clock Dependency Table",
- );
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
- );
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
- return 0;
-}
-
-static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
-{
- pp_atomctrl_voltage_table param_led_dpm;
- int result = 0;
- u32 mask = 0;
-
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
- &param_led_dpm);
- if (result == 0) {
- int i, j;
- u32 tmp = param_led_dpm.mask_low;
-
- for (i = 0, j = 0; i < 32; i++) {
- if (tmp & 1) {
- mask |= (i << (8 * j));
- if (++j >= 3)
- break;
- }
- tmp >>= 1;
- }
- }
- if (mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_LedConfig,
- mask);
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = fiji_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- result = fiji_setup_dpm_led_config(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup dpm led config", return result);
-
- fiji_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
- return 0;
-
- ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
- return result;
-}
-
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU73_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU73_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU73_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t fiji_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU73_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU73_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU73_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU73_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU73_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU73_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU73_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU73_MAX_LEVELS_MVDD;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-
-static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- fiji_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- fiji_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
deleted file mode 100644
index d9c72d992e30..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_SMC_H
-#define FIJI_SMC_H
-
-#include "smumgr.h"
-#include "smu73.h"
-
-struct fiji_pt_defaults {
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
-uint32_t fiji_get_mac_definition(uint32_t value);
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 6ae948fc524f..f572beff197f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include "smumgr.h"
+#include "smu7_dyn_defaults.h"
#include "smu73.h"
#include "smu_ucode_xfer_vi.h"
#include "fiji_smumgr.h"
@@ -37,14 +38,54 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "fiji_pwrvirus.h"
-#include "fiji_smc.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "hardwaremanager.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
#define FIJI_SMC_SIZE 0x20000
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define VDDC_VDDCI_DELTA 300
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+ {1, 0xF, 0xFD,
+ /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+ 0x19, 5, 45}
+};
+
static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
/* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
/* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
@@ -58,147 +99,114 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0); */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for ROM firmware to initialize interrupt hendler */
- /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
+ /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND,
SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = PwrVirusTable;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
+static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
@@ -206,23 +214,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
}
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
/* clear reset */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
return result;
}
-static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
int32_t vr_config;
uint32_t table_start;
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -237,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -245,7 +253,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -253,9 +261,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -265,17 +273,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
if (!smu_started)
break;
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
return -EINVAL;);
@@ -293,64 +301,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
return 0;
}
-static int fiji_start_smu(struct pp_smumgr *smumgr)
+static int fiji_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr)
- || cgs_is_virtualization_enabled(smumgr->device))) {
- fiji_avfs_event_mgr(smumgr, false);
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
+ fiji_avfs_event_mgr(hwmgr, false);
/* Check if SMU is running in protected mode */
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = fiji_start_smu_in_non_protection_mode(smumgr);
+ result = fiji_start_smu_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = fiji_start_smu_in_protection_mode(smumgr);
+ result = fiji_start_smu_in_protection_mode(hwmgr);
if (result)
return result;
}
- fiji_avfs_event_mgr(smumgr, true);
+ fiji_avfs_event_mgr(hwmgr, true);
}
/* To initialize all clock gating before RLC loaded and running.*/
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- smu7_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
&(priv->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
- if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+ if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
return true;
@@ -358,14 +366,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-static int fiji_smu_init(struct pp_smumgr *smumgr)
+static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_smumgr *fiji_priv = NULL;
@@ -375,9 +376,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (fiji_priv == NULL)
return -ENOMEM;
- smumgr->backend = fiji_priv;
+ hwmgr->smu_backend = fiji_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
@@ -386,6 +387,2334 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ *voltage = *mvdd = 0;
+
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+ switch (line) {
+ case SMU7_I2CLineID_DDC1:
+ *scl = SMU7_I2C_DDC1CLK;
+ *sda = SMU7_I2C_DDC1DATA;
+ break;
+ case SMU7_I2CLineID_DDC2:
+ *scl = SMU7_I2C_DDC2CLK;
+ *sda = SMU7_I2C_DDC2DATA;
+ break;
+ case SMU7_I2CLineID_DDC3:
+ *scl = SMU7_I2C_DDC3CLK;
+ *sda = SMU7_I2C_DDC3DATA;
+ break;
+ case SMU7_I2CLineID_DDC4:
+ *scl = SMU7_I2C_DDC4CLK;
+ *sda = SMU7_I2C_DDC4DATA;
+ break;
+ case SMU7_I2CLineID_DDC5:
+ *scl = SMU7_I2C_DDC5CLK;
+ *sda = SMU7_I2C_DDC5DATA;
+ break;
+ case SMU7_I2CLineID_DDC6:
+ *scl = SMU7_I2C_DDC6CLK;
+ *sda = SMU7_I2C_DDC6DATA;
+ break;
+ case SMU7_I2CLineID_SCLSDA:
+ *scl = SMU7_I2C_SCL;
+ *sda = SMU7_I2C_SDA;
+ break;
+ case SMU7_I2CLineID_DDCVGA:
+ *scl = SMU7_I2C_DDCVGACLK;
+ *sda = SMU7_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &fiji_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ uint8_t uc_scl, uc_sda;
+
+ /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+ * as requested by SMC team
+ */
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+ /* The following are for new Fiji Multi-input fan/thermal control */
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+ dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+ dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrVddc * 256);
+ dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+ dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitPlx * 256);
+
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+ dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainLiquid));
+ dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+ dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+ dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainPlx));
+ dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+ dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+ dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+ dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+ dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+ get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Liquid_I2C_LineSCL = uc_scl;
+ dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Vr_I2C_LineSCL = uc_scl;
+ dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Plx_I2C_LineSCL = uc_scl;
+ dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+ return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ 0 == hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ uint32_t pm_fuse_table_offset;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (fiji_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (fiji_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != fiji_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (fiji_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (fiji_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW20 */
+ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = fiji_populate_cac_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_Ulv *state)
+{
+ int result = 0;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+ }
+ return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ssInfo;
+
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ssInfo)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ *
+ * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+ */
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ssInfo.speed_spectrum_rate);
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU73_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t threshold, mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ (uint32_t *)(&level->MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+
+ level->SclkFrequency = clock;
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ threshold = clock * data->fast_watermark_threshold / 100;
+
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = fiji_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now.*/
+ levels[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz, 800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+ if (mem_clock <= 10000)
+ return 0x0;
+ if (mem_clock <= 15000)
+ return 0x1;
+ if (mem_clock <= 20000)
+ return 0x2;
+ if (mem_clock <= 25000)
+ return 0x3;
+ if (mem_clock <= 30000)
+ return 0x4;
+ if (mem_clock <= 35000)
+ return 0x5;
+ if (mem_clock <= 40000)
+ return 0x6;
+ if (mem_clock <= 45000)
+ return 0x7;
+ if (mem_clock <= 50000)
+ return 0x8;
+ if (mem_clock <= 55000)
+ return 0x9;
+ if (mem_clock <= 60000)
+ return 0xa;
+ if (mem_clock <= 65000)
+ return 0xb;
+ if (mem_clock <= 70000)
+ return 0xc;
+ if (mem_clock <= 75000)
+ return 0xd;
+ if (mem_clock <= 80000)
+ return 0xe;
+ /* mem_clock > 800MHz */
+ return 0xf;
+}
+
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+ struct pp_atomctrl_memory_clock_param mem_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to get Memory PLL Dividers.",
+ );
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = clock;
+ mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
+ mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
+
+ return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ /* enable stutter mode if all the follow condition applied
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+ data->display_timing.num_existing_displays = 1;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+ SMU73_MAX_LEVELS_MEMORY;
+ struct SMU73_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = fiji_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now. */
+ levels[0].EnabledForActivity = 1;
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high */
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (!data->sclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ table->ACPILevel.SclkFrequency =
+ data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ table->ACPILevel.SclkFrequency,
+ (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value " \
+ "in Clock Dependency Table",
+ );
+ } else {
+ table->ACPILevel.SclkFrequency =
+ data->vbios_boot_state.sclk_bootup_value;
+ table->ACPILevel.MinVoltage =
+ data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+ }
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ if (!data->mclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+ );
+ } else {
+ table->MemoryACPILevel.MclkFrequency =
+ data->vbios_boot_state.mclk_bootup_value;
+ table->MemoryACPILevel.MinVoltage =
+ data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+ }
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!fiji_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->VceLevel[count].MinVoltage |=
+ ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t)(mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burstTime;
+ ULONG state, trrds, trrdl;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+ state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+ trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+ trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+ arb_regs->TRRDS = (uint8_t)trrds;
+ arb_regs->TRRDL = (uint8_t)trrdl;
+
+ return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = fiji_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ break;
+ }
+ }
+
+ if (!result)
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU73_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+ }
+ return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (fiji_clock_stretch_amount_conversion
+ [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >=
+ (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq <
+ (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+
+ return 0;
+}
+
+static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
+{
+ pp_atomctrl_voltage_table param_led_dpm;
+ int result = 0;
+ u32 mask = 0;
+
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
+ &param_led_dpm);
+ if (result == 0) {
+ int i, j;
+ u32 tmp = param_led_dpm.mask_low;
+
+ for (i = 0, j = 0; i < 32; i++) {
+ if (tmp & 1) {
+ mask |= (i << (8 * j));
+ if (++j >= 3)
+ break;
+ }
+ tmp >>= 1;
+ }
+ }
+ if (mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_LedConfig,
+ mask);
+ return 0;
+}
+
+static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+ fiji_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ fiji_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = fiji_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = fiji_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = fiji_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = fiji_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = fiji_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = fiji_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = fiji_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = fiji_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = fiji_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = fiji_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = fiji_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = fiji_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = fiji_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = fiji_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = fiji_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = fiji_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ result = fiji_setup_dpm_led_config(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup dpm led config", return result);
+
+ fiji_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+
+static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ return 0;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return fiji_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ result = fiji_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+ return result;
+}
+
+static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t fiji_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU73_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU73_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU73_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU73_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU73_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU73_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU73_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU73_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ fiji_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ fiji_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+ /* Program additional LP registers
+ * that are no longer programmed by VBIOS
+ */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+ return 0;
+}
+
+static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 175bf9f8ef9c..279647772578 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,6 +28,15 @@
#include "smu7_smumgr.h"
+struct fiji_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+};
+
struct fiji_smumgr {
struct smu7_smumgr smu7_data;
struct SMU73_Discrete_DpmTable smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
deleted file mode 100644
index 13c8dbbccaf2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _ICELAND_SMC_H
-#define _ICELAND_SMC_H
-
-#include "smumgr.h"
-
-
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
-uint32_t iceland_get_mac_definition(uint32_t value);
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 0bf2def3b659..34128822b8fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -30,64 +30,133 @@
#include "smumgr.h"
#include "iceland_smumgr.h"
-#include "smu_ucode_xfer_vi.h"
+
#include "ppsmc.h"
+
+#include "cgs_common.h"
+
+#include "smu7_dyn_defaults.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "processpptables.h"
+
+
#include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h"
-#include "cgs_common.h"
-#include "iceland_smc.h"
+#include "smu71_discrete.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#define ICELAND_SMC_SIZE 0x20000
-static int iceland_start_smc(struct pp_smumgr *smumgr)
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+ * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static int iceland_start_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
return 0;
}
-static void iceland_reset_smc(struct pp_smumgr *smumgr)
+static void iceland_reset_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
}
-static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 1);
}
-static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 0);
}
-static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr)
{
/* set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* enable smc clock */
- iceland_start_smc_clock(smumgr);
+ iceland_start_smc_clock(hwmgr);
/* de-assert reset */
- iceland_start_smc(smumgr);
+ iceland_start_smc(hwmgr);
- SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
INTERRUPTS_ENABLED, 1);
return 0;
}
-static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr,
uint32_t length, const uint8_t *src,
uint32_t limit, uint32_t start_addr)
{
@@ -96,34 +165,34 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
while (byte_count >= 4) {
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
{
uint32_t val;
struct cgs_firmware_info info = {0};
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
/* load SMC firmware */
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) {
@@ -137,68 +206,61 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
}
/* wait for smc boot up */
- SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* clear firmware interrupt enable flag */
- val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL, val | 1);
/* stop smc clock */
- iceland_stop_smc_clock(smumgr);
+ iceland_stop_smc_clock(hwmgr);
/* reset smc */
- iceland_reset_smc(smumgr);
- iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ iceland_reset_smc(hwmgr);
+ iceland_upload_smc_firmware_data(hwmgr, info.image_size,
(uint8_t *)info.kptr, ICELAND_SMC_SIZE,
info.ucode_start_address);
return 0;
}
-static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
uint32_t firmwareType)
{
return 0;
}
-static int iceland_start_smu(struct pp_smumgr *smumgr)
+static int iceland_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
pr_info("smu not running, upload firmware again \n");
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int iceland_smu_init(struct pp_smumgr *smumgr)
+static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct iceland_smumgr *iceland_priv = NULL;
@@ -208,9 +270,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
if (iceland_priv == NULL)
return -ENOMEM;
- smumgr->backend = iceland_priv;
+ hwmgr->smu_backend = iceland_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
@@ -219,6 +281,2413 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case DEVICE_ID_VI_ICELAND_M_6900:
+ case DEVICE_ID_VI_ICELAND_M_6903:
+ smu_data->power_tune_defaults = &defaults_icelandxt;
+ break;
+
+ case DEVICE_ID_VI_ICELAND_M_6901:
+ case DEVICE_ID_VI_ICELAND_M_6902:
+ smu_data->power_tune_defaults = &defaults_icelandpro;
+ break;
+ default:
+ smu_data->power_tune_defaults = &defaults_iceland;
+ pr_warn("Unknown V.I. Device ID.\n");
+ break;
+ }
+ return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 8; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ }
+ } else {
+ PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+ }
+
+ return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW0 - DW3 */
+ if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate bapm vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW4 - DW5 */
+ if (iceland_populate_vddc_vid(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (iceland_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (iceland_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != iceland_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW16 */
+ if (iceland_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW18 */
+ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+ return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ /* SCLK/VDDC Dependency Table has to exist. */
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ /*
+ * Since voltage in the sclk/vddc dependency table is not
+ * necessarily in ascending order because of ELB voltage
+ * patching, loop through entire list to find exact voltage.
+ */
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ /*
+ * If voltage is not found in the first pass, loop again to
+ * find the best match, equal or higher value.
+ */
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (0 != result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = iceland_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU71_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_Ulv *ulv_level)
+{
+ return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+ &graphic_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for VDDC \
+ engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ graphic_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ iceland_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 100;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+ SMU71_MAX_LEVELS_GRAPHICS;
+
+ SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = iceland_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (highest_pcie_level_enabled + 1))) != 0) {
+ highest_pcie_level_enabled++;
+ }
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+ count++;
+ }
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+ }
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 47500) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ }
+ } else {
+ if (memory_clock < 65000) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 135000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000) {
+ mc_para_index = 0;
+ } else if (memory_clock >= 80000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+ }
+
+ return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+ memory_level->MinVddci = memory_level->MinVddc;
+ } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on iceland */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = iceland_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+ SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result) {
+ return result;
+ }
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU71_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t vddc_phase_shed_control = 0;
+
+ SMU71_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = iceland_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result) {
+ break;
+ }
+ }
+ }
+
+ if (0 == result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->BootVddci = table->BootVddc;
+ else
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+ const struct iceland_mc_reg_entry *entry,
+ SMU71_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = iceland_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+ result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+
+ /*
+ * TDP number of fraction bits are changed from 8 to 7 for Iceland
+ * as requested by SMC team
+ */
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+ dpm_table->DTETjOffset = 0;
+
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ /* The following are for new Iceland Multi-input fan/thermal control */
+ if (NULL != ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU71_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ tab->SVI2Enable |= VDDC_ON_SVI2;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ tab->SVI2Enable |= VDDCI_ON_SVI2;
+ else
+ tab->MergedVddci = 1;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+ tab->SVI2Enable |= MVDD_ON_SVI2;
+
+ PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+ (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+
+
+ iceland_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+ iceland_populate_smc_voltage_tables(hwmgr, table);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+ if (data->ulv_supported) {
+ result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = iceland_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result;);
+
+ result = iceland_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result;);
+
+ result = iceland_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result;);
+
+ result = iceland_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result;);
+
+ result = iceland_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result;);
+
+ result = iceland_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+ result = iceland_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result;);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result;);
+
+ result = iceland_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result;);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = iceland_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result;);
+
+ result = iceland_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+
+ result = iceland_populate_smc_svi2_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate SVI2 setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.ulv_setting_starts,
+ (uint8_t *)&(smu_data->ulv_setting),
+ sizeof(SMU71_Discrete_Ulv),
+ SMC_RAM_END);
+
+
+ result = iceland_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = iceland_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ return 0;
+}
+
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu7_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ /* fan_table.FanControl_GL_Flag = 1; */
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return iceland_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = iceland_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t iceland_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU71_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU71_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU71_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU71_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU71_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU71_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU71_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->dpm_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ smu7_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->mc_reg_table_start = tmp;
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->fan_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->arb_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ hwmgr->microcode_version_info.SMC = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, UlvSettings),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->ulv_setting_starts = tmp;
+ }
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct iceland_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++) {
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ }
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j;
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ iceland_set_s0_mc_reg_index(ni_table);
+ result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ iceland_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
const struct pp_smumgr_func iceland_smu_funcs = {
.smu_init = &iceland_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 8eae01b37c40..802472530d34 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -39,7 +39,7 @@ struct iceland_pt_defaults {
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
- uint32_t bamp_temp_gradient;
+ uint32_t bapm_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
deleted file mode 100644
index 99a00bd39256..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ /dev/null
@@ -1,2364 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "polaris10_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "polaris10_smumgr.h"
-#include "pppcielanes.h"
-
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
-#include "smu7_ppsmc.h"
-#include "smu7_smumgr.h"
-
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VDDC_VDDCI_DELTA 200
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
- { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
- {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
- {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
- {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- int i, j, k;
- const uint16_t *pdef1;
- const uint16_t *pdef2;
-
- table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
- table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
- pdef1 = defaults->BAPMTI_R;
- pdef2 = defaults->BAPMTI_RC;
-
- for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU74_DTE_SOURCES; j++) {
- for (k = 0; k < SMU74_DTE_SINKS; k++) {
- table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
- table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
-/* TO DO move to hwmgr */
- if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
- || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- if (polaris10_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
-
- if (polaris10_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
- if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- if (0 != polaris10_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- if (polaris10_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- if (polaris10_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count, level;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- count = data->mvdd_voltage_table.count;
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; level++) {
- table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[level].Smio =
- (uint8_t) level;
- table->Smio[level] |=
- data->mvdd_voltage_table.entries[level].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
- }
-
- return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count, level;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- count = data->vddci_voltage_table.count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; ++level) {
- table->SmioTable1.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
- table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
- table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- polaris10_populate_smc_vddci_table(hwmgr, table);
- polaris10_populate_smc_mvdd_table(hwmgr, table);
- polaris10_populate_cac_table(hwmgr, table);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_Ulv *state)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
- state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
- else
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
-
-/* To Do move to hwmgr */
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t i, ref_clk;
-
- struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
- ref_clk = smu7_get_xclk(hwmgr);
-
- if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
- table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
- return;
- }
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
- smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
- table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
- table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct pp_atomctrl_clock_dividers_ai dividers;
- uint32_t ref_clock;
- uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
- uint8_t i;
- int result;
- uint64_t temp;
-
- sclk_setting->SclkFrequency = clock;
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
- if (result == 0) {
- sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
- sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
- sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
- sclk_setting->PllRange = dividers.ucSclkPllRange;
- sclk_setting->Sclk_slew_rate = 0x400;
- sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
- sclk_setting->Pcc_down_slew_rate = 0xffff;
- sclk_setting->SSc_En = dividers.ucSscEnable;
- sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
- sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
- sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
- return result;
- }
-
- ref_clock = smu7_get_xclk(hwmgr);
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- if (clock > smu_data->range_table[i].trans_lower_frequency
- && clock <= smu_data->range_table[i].trans_upper_frequency) {
- sclk_setting->PllRange = i;
- break;
- }
- }
-
- sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw_frac = temp & 0xffff;
-
- pcc_target_percent = 10; /* Hardcode 10% for now. */
- pcc_target_freq = clock - (clock * pcc_target_percent / 100);
- sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
- ss_target_percent = 2; /* Hardcode 2% for now. */
- sclk_setting->SSc_En = 0;
- if (ss_target_percent) {
- sclk_setting->SSc_En = 1;
- ss_target_freq = clock - (clock * ss_target_percent / 100);
- sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw1_frac = temp & 0xffff;
- }
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU_SclkSetting curr_sclk_setting = { 0 };
-
- result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
- /* populate graphics levels */
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
- level->ActivityLevel = sclk_al_threshold;
-
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- if (data->update_up_hyst)
- level->UpHyst = (uint8_t)data->up_hyst;
- if (data->update_down_hyst)
- level->DownHyst = (uint8_t)data->down_hyst;
-
- level->SclkSetting = curr_sclk_setting;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
- return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
- result = polaris10_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport))
- smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
- uint32_t mclk_stutter_mode_threshold = 40000;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (table_info->vdd_dep_on_mclk) {
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->MclkFrequency = clock;
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- data->display_timing.num_existing_displays = info.display_count;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
- SMU74_MAX_LEVELS_MEMORY;
- struct SMU74_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = polaris10_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (i == dpm_table->mclk_table.count - 1) {
- levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- levels[i].EnabledForActivity = 1;
- }
- if (result)
- return result;
- }
-
- /* In order to prevent MC activity from stutter mode to push DPM up,
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not affected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = 0x1f;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- uint32_t sclk_frequency;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- sclk_frequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",
- );
-
- result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
- PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- table->ACPILevel.DeepSleepDivId = 0;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
- table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- table->MemoryACPILevel.StutterEnable = false;
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
- table->VceLevel[count].MinVoltage |=
- (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burst_time;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burst_time;
-
- return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
- result = polaris10_populate_memory_timing_parameters(hwmgr,
- hw_data->dpm_table.sclk_table.dpm_levels[i].value,
- hw_data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result == 0)
- result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
- if (result != 0)
- return result;
- }
- }
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU74_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- hw_data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- hw_data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (67 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
-
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
- } else {
- min = 1100;
- max = 2100;
- }
-
- ro = efuse * (max - min) / 255 + min;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
- (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
- (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
- } else {
- volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
- volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
- }
-
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
- offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- int result = 0;
- struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
- AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
- AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
- uint32_t tmp, i;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
-
- if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
-
- result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
- if (0 == result) {
- table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
- table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
- table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
- table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
- table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
- table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
- table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
- table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
- table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
- table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
- table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
- table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
- table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
- table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
- table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
- AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
- AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
- AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
- AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
- AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
- AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
- AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
- for (i = 0; i < NUM_VFT_COLUMNS; i++) {
- AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
- AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
- }
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
- &tmp, SMC_RAM_END);
-
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_meanNsigma,
- sizeof(AVFS_meanNsigma_t),
- SMC_RAM_END);
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
- &tmp, SMC_RAM_END);
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_SclkOffset,
- sizeof(AVFS_Sclk_Offset_t),
- SMC_RAM_END);
-
- data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
- }
- return result;
-}
-
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &polaris10_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
- pp_atomctrl_clock_dividers_vi dividers;
-
- polaris10_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
- polaris10_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (hw_data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = polaris10_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
- }
-
- result = polaris10_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = polaris10_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = polaris10_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = polaris10_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = polaris10_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = polaris10_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = polaris10_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = polaris10_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- result = polaris10_populate_avfs_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
- table->CurrSclkPllRange = 0xff;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = polaris10_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
- & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
- && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- /* Populate BIF_SCLK levels into SMC DPM table */
- for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
- PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
- if (i == 0)
- table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- else
- table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- }
-
- for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = polaris10_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = polaris10_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- polaris10_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return polaris10_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return 0;
-
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
-static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- int max_entry, i;
-
- max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU74_MAX_LEVELS_LINK :
- pcie_table->count;
- /* Setup BIF_SCLK levels */
- for (i = 0; i < max_entry; i++)
- smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
- return 0;
-}
-
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- polaris10_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- polaris10_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
- case SMU_BIF_TABLE:
- polaris10_update_bif_smc_table(hwmgr);
- default:
- break;
- }
- return 0;
-}
-
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to update SCLK threshold!", return result);
-
- result = polaris10_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
-
- return result;
-}
-
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU74_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU74_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU74_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t polaris10_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU74_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU74_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU74_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU74_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU74_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU74_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU74_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU74_MAX_LEVELS_MVDD;
- case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
- return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
deleted file mode 100644
index 1df8154d0626..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef POLARIS10_SMC_H
-#define POLARIS10_SMC_H
-
-#include "smumgr.h"
-
-
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
-uint32_t polaris10_get_mac_definition(uint32_t value);
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 75f43dadc56b..bd6be7793ca7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -35,13 +35,47 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
-#include "polaris10_smc.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
@@ -60,46 +94,13 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = pwr_virus_table;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -107,16 +108,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
if (smu_data->avfs.avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
}
return result;
}
-static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -127,7 +128,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -138,14 +139,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -153,7 +154,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -162,7 +163,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -172,9 +173,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
static int
-polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -183,20 +184,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
return -EINVAL);
smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
@@ -215,146 +216,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
return 0;
}
-static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS))
PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu(struct pp_smumgr *smumgr)
+static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
SMU_VFT_INTACT = false;
- smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
- result = polaris10_start_smu_in_non_protection_mode(smumgr);
+ result = polaris10_start_smu_in_non_protection_mode(hwmgr);
} else {
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
/* If failed, try with different security Key. */
if (result != 0) {
smu_data->smu7_data.security_hard_key ^= 1;
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
}
}
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(smumgr, true);
+ polaris10_avfs_event_mgr(hwmgr, true);
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+ polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse;
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
@@ -362,7 +363,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-static int polaris10_smu_init(struct pp_smumgr *smumgr)
+static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data;
int i;
@@ -371,9 +372,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu_data == NULL)
return -ENOMEM;
- smumgr->backend = smu_data;
+ hwmgr->smu_backend = smu_data;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
@@ -382,6 +383,2195 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
+ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+ else
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = smu7_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = smu7_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+ uint32_t mclk_stutter_mode_threshold = 40000;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* In order to prevent MC activity from stutter mode to push DPM up,
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not affected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = polaris10_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ polaris10_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ polaris10_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ polaris10_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU74_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU74_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU74_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU74_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU74_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU74_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU74_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU74_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
.smu_fini = smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index ce0a30388ea1..b98ade676d12 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -48,20 +48,20 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
-static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
-int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
-int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
+static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
&smc_driver_if_version),
"Attempt to read SMC IF Version Number Failed!",
return -EINVAL);
@@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
}
/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerUpSdma),
"Attempt to power up sdma Failed!",
return -EINVAL);
@@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerDownSdma),
"Attempt to power down sdma Failed!",
return -EINVAL);
@@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
}
/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0),
"Attempt to power up vcn Failed!",
return -EINVAL);
@@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0),
"Attempt to power down vcn Failed!",
return -EINVAL);
@@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smu_fini(struct pp_smumgr *smumgr)
+static int rv_smu_fini(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
if (priv) {
- rv_smc_disable_sdma(smumgr);
- rv_smc_disable_vcn(smumgr);
- cgs_free_gpu_mem(smumgr->device,
+ rv_smc_disable_sdma(hwmgr);
+ rv_smc_disable_vcn(hwmgr);
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[CLOCKTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int rv_start_smu(struct pp_smumgr *smumgr)
+static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
- if (rv_verify_smc_interface(smumgr))
+ if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
- if (rv_smc_enable_sdma(smumgr))
+ if (rv_smc_enable_sdma(hwmgr))
return -EINVAL;
- if (rv_smc_enable_vcn(smumgr))
+ if (rv_smc_enable_vcn(hwmgr))
return -EINVAL;
return 0;
}
-static int rv_smu_init(struct pp_smumgr *smumgr)
+static int rv_smu_init(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv;
uint64_t mc_addr;
@@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
@@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(DpmClocks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for CLOCKTABLE.",
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index 262c8ded87c0..58888400f1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -51,11 +51,11 @@ struct rv_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr);
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f22002f..7f5359a97ef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -25,27 +25,28 @@
#include "pp_debug.h"
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_common.h"
+
+#include "polaris10_pwrvirus.h"
#define SMU7_SMC_SIZE 0x20000
-static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
return 0;
}
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
@@ -59,7 +60,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
addr = smc_start_address;
while (byte_count >= 4) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
@@ -69,7 +70,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
if (byte_count) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
@@ -81,7 +82,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
@@ -99,12 +100,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
@@ -115,13 +116,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data = 0;
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
@@ -135,53 +136,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data |= (original_data & ~((~0UL) << extra_shift));
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
- smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+ smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send message %x ret is %d \n", msg, ret);
@@ -189,53 +190,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
return -EINVAL;
}
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc(smumgr, msg);
+ return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+ return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
}
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
pr_info("Failed to send Message.\n");
return 0;
}
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
{
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
@@ -289,29 +290,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
}
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
return 0;
}
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
@@ -354,14 +355,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
return result;
}
-static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
- result = cgs_get_firmware_info(smumgr->device,
+ result = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(fw_type),
&info);
@@ -374,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->meta_data_addr_low = 0;
/* digest need be excluded out */
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
@@ -389,30 +390,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
return 0;
}
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
0x0);
- if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- if (!cgs_is_virtualization_enabled(smumgr->device)) {
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
smu_data->smu_buffer.mc_addr_low);
}
@@ -439,122 +440,162 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
toc->num_entries = 0;
toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ if (cgs_is_virtualization_enabled(hwmgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
- if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
-
return ret;
}
-int smu7_reload_firmware(struct pp_smumgr *smumgr)
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
{
- return smumgr->smumgr_funcs->start_smu(smumgr);
+ return hwmgr->smumgr_funcs->start_smu(hwmgr);
}
-static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
- smumgr->is_kicker = info.is_kicker;
+ hwmgr->is_kicker = info.is_kicker;
- result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+ result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
}
-int smu7_init(struct pp_smumgr *smumgr)
+static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
+{
+ int i;
+ uint32_t reg, data;
+
+ for (i = 0; i < size; i++) {
+ reg = pvirus->reg;
+ data = pvirus->data;
+ if (reg != 0xffffffff)
+ cgs_write_register(hwmgr->device, reg, data);
+ else
+ break;
+ pvirus++;
+ }
+}
+
+static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
+{
+ int i;
+
+ cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
+ for (i = 0; i < section->dfy_size; i++)
+ cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
+}
+
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
+{
+ execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
+ execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
+
+ return 0;
+}
+
+int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
uint8_t *internal_buf;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
- smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -568,16 +609,16 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
smu_data->smu_buffer.data_size = 200*4096;
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -591,12 +632,12 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
- if (smum_is_hw_avfs_present(smumgr))
+ if (smum_is_hw_avfs_present(hwmgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
@@ -605,12 +646,10 @@ int smu7_init(struct pp_smumgr *smumgr)
}
-int smu7_smu_fini(struct pp_smumgr *smumgr)
+int smu7_smu_fini(struct pp_hwmgr *hwmgr)
{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index ee5e32d2921e..c87263bc0caa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,32 +60,34 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
uint32_t *dest, uint32_t byte_count, uint32_t limit);
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit);
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
-int smu7_reload_firmware(struct pp_smumgr *smumgr);
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
-int smu7_init(struct pp_smumgr *smumgr);
-int smu7_smu_fini(struct pp_smumgr *smumgr);
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr);
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr);
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
+int smu7_init(struct pp_hwmgr *hwmgr);
+int smu7_smu_fini(struct pp_hwmgr *hwmgr);
-#endif \ No newline at end of file
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3bdf6478de7f..867388456530 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
-#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
-int smum_early_init(struct pp_instance *handle)
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
- if (smumgr == NULL)
- return -ENOMEM;
-
- smumgr->device = handle->device;
- smumgr->chip_family = handle->chip_family;
- smumgr->chip_id = handle->chip_id;
- smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- smumgr->reload_fw = 1;
- handle->smu_mgr = smumgr;
-
- switch (smumgr->chip_family) {
- case AMDGPU_FAMILY_CZ:
- smumgr->smumgr_funcs = &cz_smu_funcs;
- break;
- case AMDGPU_FAMILY_VI:
- switch (smumgr->chip_id) {
- case CHIP_TOPAZ:
- smumgr->smumgr_funcs = &iceland_smu_funcs;
- break;
- case CHIP_TONGA:
- smumgr->smumgr_funcs = &tonga_smu_funcs;
- break;
- case CHIP_FIJI:
- smumgr->smumgr_funcs = &fiji_smu_funcs;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- smumgr->smumgr_funcs = &polaris10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_AI:
- switch (smumgr->chip_id) {
- case CHIP_VEGA10:
- smumgr->smumgr_funcs = &vega10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_RV:
- switch (smumgr->chip_id) {
- case CHIP_RAVEN:
- smumgr->smumgr_funcs = &rv_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- kfree(smumgr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
- return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
return 0;
}
-int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
- return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
return 0;
}
@@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
- return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr);
return 0;
}
@@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
- return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+ if (NULL != hwmgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type);
return 0;
}
-uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member)
{
- if (NULL != smumgr->smumgr_funcs->get_offsetof)
- return smumgr->smumgr_funcs->get_offsetof(type, member);
+ if (NULL != hwmgr->smumgr_funcs->get_offsetof)
+ return hwmgr->smumgr_funcs->get_offsetof(type, member);
return 0;
}
int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
- return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr_funcs->process_firmware_header(hwmgr);
return 0;
}
-int smum_get_argument(struct pp_smumgr *smumgr)
+int smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->get_argument)
- return smumgr->smumgr_funcs->get_argument(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->get_argument)
+ return hwmgr->smumgr_funcs->get_argument(hwmgr);
return 0;
}
-uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
- if (NULL != smumgr->smumgr_funcs->get_mac_definition)
- return smumgr->smumgr_funcs->get_mac_definition(value);
+ if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
+ return hwmgr->smumgr_funcs->get_mac_definition(value);
return 0;
}
-int smum_download_powerplay_table(struct pp_smumgr *smumgr,
- void **table)
+int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table)
{
- if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
- return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
+ if (NULL != hwmgr->smumgr_funcs->download_pptable_settings)
+ return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr,
table);
return 0;
}
-int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
+int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
- return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings)
+ return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr);
return 0;
}
-int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
+ if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
+ return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
}
-int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL ||
- smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
- return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
- smumgr, msg, parameter);
-}
-
-/*
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL)
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Returns once the part of the register indicated by the mask
- * has reached the given value.The indirect space is described by
- * giving the memory-mapped index of the indirect index register.
- */
-int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(smumgr->device, indirect_port, index);
- return smum_wait_on_register(smumgr, indirect_port + 1,
- mask, value);
-}
-
-void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return;
- cgs_write_register(smumgr->device, indirect_port, index);
- smum_wait_for_register_unequal(smumgr, indirect_port + 1,
- value, mask);
+ return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+ hwmgr, msg, parameter);
}
int smu_allocate_memory(void *device, uint32_t size,
@@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size,
return -EINVAL;
ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
- 0, 0, (cgs_handle_t *)handle);
+ (cgs_handle_t *)handle);
if (ret)
return -ENOMEM;
@@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle)
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
- return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr_funcs->init_smc_table(hwmgr);
return 0;
}
int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
return 0;
}
int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
return 0;
}
@@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
/*this interface is needed by island ci/vi */
int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
- return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
return 0;
}
bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
- return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr_funcs->is_dpm_running(hwmgr);
return true;
}
@@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
- if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(
+ if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
hwmgr, request);
return 0;
}
-bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
- if (smumgr->smumgr_funcs->is_hw_avfs_present)
- return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+ if (hwmgr->smumgr_funcs->is_hw_avfs_present)
+ return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr);
return false;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
deleted file mode 100644
index 65d3a4893958..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ /dev/null
@@ -1,3275 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- *
- */
-
-#include "pp_debug.h"
-#include "tonga_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "tonga_smumgr.h"
-#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-
-#include "smu72_discrete.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-
-static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
- {1, 0xF, 0xFD, 0x19,
- 5, 45, 0, 0xB0000,
- {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
- 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
- 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
- },
-};
-
-/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0},
- {600, 1050, 6, 1}
-};
-
-/* [FF, SS] type, [] 4 voltage ranges,
- * and [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
-};
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
-static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- voltage->VddGfx = phm_get_voltage_index(
- pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i].vddgfx);
- voltage->Vddc = phm_get_voltage_index(
- pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i].vddc);
-
- if (allowed_clock_voltage_table->entries[i].vddci)
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
- else
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
-
-
- if (allowed_clock_voltage_table->entries[i].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-
- voltage->Phases = 1;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddgfx);
- voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddc);
-
- if (allowed_clock_voltage_table->entries[i-1].vddci)
- voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i-1].vddci);
-
- if (allowed_clock_voltage_table->entries[i-1].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-
- return 0;
-}
-
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- table->VddcTable[count] =
- PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
- }
- return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
- for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
- table->VddGfxTable[count] =
- PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
- }
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- table->SmioTable1.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
- table->SmioTable1.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->vddci_voltage_table.entries[count].smio_low;
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- table->SmioTable2.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->mvdd_voltage_table.entries[count].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
- }
-
- return 0;
-}
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
- pptable_info->vddgfx_lookup_table;
- struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
- pptable_info->vddc_lookup_table;
-
- /* table is already swapped, so in order to use the value from it
- * we need to swap it back.
- */
- uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
- uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
- for (count = 0; count < vddc_level_count; count++) {
- /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
-
- if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
- /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
- for (count = 0; count < vddgfx_level_count; count++) {
- index = phm_get_voltage_index(vddgfx_lookup_table,
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
- }
- } else {
- for (count = 0; count < vddc_level_count; count++) {
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
- }
-
- return 0;
-}
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result;
-
- result = tonga_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDC voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDCI voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDGFX voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate MVDD voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_cac_tables(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_Ulv *state)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_DpmTable *table)
-{
- return tonga_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU72_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
- &graphic_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((!result),
- "can not find VDDC voltage value for VDDC "
- "engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (!result) {
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
- uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
-
- SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i, max_entry;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = tonga_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- }
- } else {
- if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- pr_err("Pcie Dpm Enablemask is 0 !");
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(highest_pcie_level_enabled+1))) != 0)) {
- highest_pcie_level_enabled++;
- }
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<lowest_pcie_level_enabled)) == 0)) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
- count++;
- }
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(
- !result,
- "Error retrieving Memory Clock Parameters from VBIOS.",
- return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
- mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF,
- mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC,
- mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE,
- mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
-
- if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL,
- mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
-
- return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-
- return mc_para_index;
-}
-
-
-static int tonga_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *memory_level
- )
-{
- uint32_t mvdd = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dll_state_on;
- struct cgs_display_info info = {0};
- uint32_t mclk_edc_wr_enable_threshold = 40000;
- uint32_t mclk_stutter_mode_threshold = 30000;
- uint32_t mclk_edc_enable_threshold = 40000;
- uint32_t mclk_strobe_mode_threshold = 40000;
-
- if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk,
- memory_clock,
- &memory_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE(
- !result,
- "can not find MinVddc voltage value from memory VDDC "
- "voltage dependency table",
- return result);
- }
-
- if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
- memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
- else
- memory_level->MinMvdd = mvdd;
-
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0))
- memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((mclk_edc_enable_threshold != 0) &&
- (memory_clock > mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dll_state_on = data->dll_default_on;
- }
- } else {
- memory_level->StrobeRatio =
- tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = tonga_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
-
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_address =
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size =
- sizeof(SMU72_Discrete_MemoryLevel) *
- SMU72_MAX_LEVELS_MEMORY;
- SMU72_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = tonga_populate_single_memory_level(
- hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &(smu_data->smc_state_table.MemoryLevel[i]));
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now.*/
- smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- smio_pattern->Voltage =
- data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
-
- SMIO_Pattern voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- table->ACPILevel.MinVoltage =
- smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
- table->MemoryACPILevel.MinVoltage =
- smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t) (mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->UvdLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->UvdLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->UvdLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(
- hwmgr,
- table->UvdLevel[count].VclkFrequency,
- &dividers);
-
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Vclk clock",
- return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Dclk clock",
- return result);
-
- table->UvdLevel[count].DclkDivider =
- (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- }
-
- return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t) (mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->VceLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->VceLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->VceLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t) (mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->AcpLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->AcpLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->AcpLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- int result = 0;
- SMU72_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = tonga_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (result)
- break;
- }
- }
-
- if (!result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU72_Discrete_MCArbDramTimingTable),
- SMC_RAM_END
- );
- }
-
- return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table*/
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.GraphicsBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot engine "
- "clock value in dependency table. "
- "Using Graphics DPM level 0 !");
- result = 0;
- }
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.MemoryBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot "
- "engine clock value in dependency table."
- "Using Memory DPM level 0 !");
- result = 0;
- }
-
- table->BootVoltage.Vddc =
- phm_get_voltage_id(&(data->vddc_voltage_table),
- data->vbios_boot_state.vddc_bootup_value);
- table->BootVoltage.VddGfx =
- phm_get_voltage_id(&(data->vddgfx_voltage_table),
- data->vbios_boot_state.vddgfx_bootup_value);
- table->BootVoltage.Vddci =
- phm_get_voltage_id(&(data->vddci_voltage_table),
- data->vbios_boot_state.vddci_bootup_value);
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return result;
-}
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- uint32_t hw_revision, dev_id;
- struct cgs_system_info sys_info = {0};
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- sys_info.size = sizeof(struct cgs_system_info);
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- hw_revision = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
- volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
- (sclk_table->entries[i].clk/100) / 10000) * 1000 /
- (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
- volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
- (sclk_table->entries[i].clk/100) / 100000) * 1000 /
- (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
- } else {
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- }
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (tonga_clock_stretch_amount_conversion
- [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
- cks_setting |= 0x2;
- if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* Splitted mode */
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC and VDDGFX should "
- "be both on SVI2 control in splitted mode !\n");
- }
- } else {
- /* Merged mode */
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC should be on "
- "SVI2 control in merged mode !\n");
- }
- }
-
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- }
-
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /*
- * This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result != 0)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-
-static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- int i, j, k;
- const uint16_t *pdef1, *pdef2;
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range !",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- dpm_table->BAPM_TEMP_GRADIENT =
- PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- pdef1 = defaults->bapmti_r;
- pdef2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU72_DTE_SOURCES; j++) {
- for (k = 0; k < SMU72_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef1);
- dpm_table->BAPMTI_RC[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->tdc_vddc_throttle_release_limit_perc;
- smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
- return 0;
-}
-
-static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 "
- "(SviLoadLineEn) from SMC Failed !",
- return -EINVAL);
- else
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
- return 0;
-}
-
-static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed !",
- return -EINVAL);
-
- /* DW6 */
- if (tonga_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed !",
- return -EINVAL);
- /* DW7 */
- if (tonga_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed !",
- return -EINVAL);
- /* DW8 */
- if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl Failed !",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (tonga_populate_temperature_scaler(hwmgr) != 0)
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed !",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (tonga_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan "
- "Control parameters Failed !",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (tonga_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed !",
- return -EINVAL);
-
- /* DW19 */
- if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML "
- "Min and Max Vid Failed !",
- return -EINVAL);
-
- /* DW20 */
- if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(
- false,
- "Attempt to populate BapmVddCBaseLeakage "
- "Hi and Lo Sidd Failed !",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed !",
- return -EINVAL);
- }
- return 0;
-}
-
-static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
- if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(
- i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array "
- "out of boundary",
- return -EINVAL);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/*convert register values from driver to SMC format */
-static void tonga_convert_mc_registers(
- const struct tonga_mc_reg_entry *entry,
- SMU72_Discrete_MCRegisterSet *data,
- uint32_t num_entries, uint32_t valid_flag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & 1<<j) {
- data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
- const uint32_t memory_clock,
- SMU72_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, smu_data->mc_reg_table.last,
- smu_data->mc_reg_table.validflag);
-
- return 0;
-}
-
-static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_MCRegisters *mc_regs)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_regs->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t address;
- int32_t result;
-
- if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
-
- memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
- if (result != 0)
- return result;
-
-
- address = smu_data->smu7_data.mc_reg_table_start +
- (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(
- hwmgr->smumgr, address,
- (uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU72_Discrete_MCRegisterSet) *
- data->dpm_table.mclk_table.count,
- SMC_RAM_END);
-}
-
-static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
-
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for the MC register addresses !",
- return result;);
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for driver state !",
- return result;);
-
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &tonga_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
-}
-
-static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- uint8_t i;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-
- memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
- tonga_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- tonga_populate_smc_voltage_tables(hwmgr, table);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
- if (i == 1 || i == 0)
- table->SystemFlags |= 0x40;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = tonga_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ULV state !",
- return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = tonga_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Link Level !", return result);
-
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Graphics Level !", return result);
-
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Memory Level !", return result);
-
- result = tonga_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACPI Level !", return result);
-
- result = tonga_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize VCE Level !", return result);
-
- result = tonga_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACP Level !", return result);
-
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
- /* Since only the initial state is completely set up at this
- * point (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = tonga_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to Write ARB settings for the initial state.",
- return result;);
-
- result = tonga_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize UVD Level !", return result);
-
- result = tonga_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Boot Level !", return result);
-
- tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate BAPM Parameters !", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = tonga_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate Clock Stretcher Data Table !",
- return result;);
- }
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
-
- /*
- * Cail reads current link status and reports it as cap (we cannot
- * change this due to some previous issues we had)
- * SMC drops the link status to lowest level after enabling
- * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
- * but this time Cail reads current link status which was set to low by
- * SMC and reports it as cap to powerplay
- * To avoid it, we set PCIeBootLinkLevel to highest dpm level
- */
- PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
-
- table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
- table->PCIeGenInterval = 1;
-
- result = tonga_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate VRConfig setting !", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
-
- if (0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr,
- THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
- table->ThermOutPolarity =
- (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
-
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal)){
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- }
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
- SMC_RAM_END);
-
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload dpm data to SMC memory !", return result;);
-
- result = tonga_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload arb data to SMC memory !", return result);
-
- tonga_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize pm fuses !", return result);
-
- result = tonga_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize MC Reg table !", return result);
-
- tonga_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- return 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (0 == smu_data->smu7_data.fan_table_start) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- fan_table.FanControl_GL_Flag = 1;
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table,
- (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- return 0;
-}
-
-
-static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return tonga_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
-
- result = tonga_update_and_upload_mc_reg_table(hwmgr);
-
- PP_ASSERT_WITH_CODE((!result),
- "Failed to upload MC reg table !",
- return result);
-
- result = tonga_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters !",
- );
-
- return result;
-}
-
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU72_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU72_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU72_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t tonga_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU72_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU72_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU72_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU72_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU72_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU72_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU72_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU72_MAX_LEVELS_MVDD;
- }
- pr_warn("can't get the mac value %x\n", value);
-
- return 0;
-}
-
-
-static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- tonga_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- tonga_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (result != 0);
-
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (result != 0);
-
- return error ? 1 : 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
- bool result = true;
-
- switch (in_reg) {
- case mmMC_SEQ_RAS_TIMING:
- *out_reg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *out_reg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *out_reg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *out_reg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *out_reg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *out_reg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
- &address) ?
- address :
- table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct tonga_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -EINVAL);
-
- for (i = 0; i < table->last; i++)
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
- * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
- * mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
- * mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct tonga_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- switch (table->mc_reg_address[i].s1) {
-
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device,
- mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_gddr5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- if (!data->is_memory_gddr5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
-{
- uint8_t i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- pp_atomctrl_mc_reg_table *table;
- struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (table == NULL)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (!result)
- result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
- if (!result) {
- tonga_set_s0_mc_reg_index(ni_table);
- result = tonga_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (!result)
- tonga_set_valid_flag(ni_table);
-
- kfree(table);
-
- return result;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
deleted file mode 100644
index 962860f13f24..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _TONGA_SMC_H
-#define _TONGA_SMC_H
-
-#include "smumgr.h"
-#include "smu72.h"
-
-
-#define ASICID_IS_TONGA_P(wDID, bRID) \
- (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
- || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-
-
-struct tonga_pt_defaults {
- uint8_t svi_load_line_en;
- uint8_t svi_load_line_vddC;
- uint8_t tdc_vddc_throttle_release_limit_perc;
- uint8_t tdc_mawt;
- uint8_t tdc_waterfall_ctl;
- uint8_t dte_ambient_temp_base;
- uint32_t display_cac;
- uint32_t bamp_temp_gradient;
- uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
- uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-};
-
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
-uint32_t tonga_get_mac_definition(uint32_t value);
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index c35f4c35c9ca..0a8e48bff219 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,141 +33,193 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "cgs_common.h"
-#include "tonga_smc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
-static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+
+#include "atombios.h"
+
+#include "pppcielanes.h"
+#include "pp_endian.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ {1, 0xF, 0xFD, 0x19,
+ 5, 45, 0, 0xB0000,
+ {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+ 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+ 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+ },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0},
+ {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5},
+ {0, 2, 4, 5, 6, 5}
+};
+
+static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result;
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
pr_err("SMU Firmware start failed\n");
return -EINVAL;
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return 0;
}
-
-static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/*Clear firmware interrupt enable flag*/
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/*De-assert reset*/
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int tonga_start_smu(struct pp_smumgr *smumgr)
+static int tonga_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr) ||
- cgs_is_virtualization_enabled(smumgr->device))) {
+ if (!(smu7_is_smc_ram_running(hwmgr) ||
+ cgs_is_virtualization_enabled(hwmgr->device))) {
/*Check if SMU is running in protected mode*/
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_start_in_non_protection_mode(smumgr);
+ result = tonga_start_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = tonga_start_in_protection_mode(smumgr);
+ result = tonga_start_in_protection_mode(hwmgr);
if (result)
return result;
}
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int tonga_smu_init(struct pp_smumgr *smumgr)
+static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *tonga_priv = NULL;
int i;
@@ -176,9 +228,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
if (tonga_priv == NULL)
return -ENOMEM;
- smumgr->backend = tonga_priv;
+ hwmgr->smu_backend = tonga_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
@@ -187,6 +239,3053 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ voltage->VddGfx = phm_get_voltage_index(
+ pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(
+ pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddc);
+
+ if (allowed_clock_voltage_table->entries[i].vddci)
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+ else
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+ if (allowed_clock_voltage_table->entries[i].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+ voltage->Phases = 1;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddc);
+
+ if (allowed_clock_voltage_table->entries[i-1].vddci)
+ voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i-1].vddci);
+
+ if (allowed_clock_voltage_table->entries[i-1].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+ return 0;
+}
+
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ table->VddcTable[count] =
+ PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+ for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+ table->VddGfxTable[count] =
+ PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->SmioTable1.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+ table->SmioTable1.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->vddci_voltage_table.entries[count].smio_low;
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ table->SmioTable2.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->mvdd_voltage_table.entries[count].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ }
+
+ return 0;
+}
+
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+ pptable_info->vddgfx_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+ pptable_info->vddc_lookup_table;
+
+ /* table is already swapped, so in order to use the value from it
+ * we need to swap it back.
+ */
+ uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+ uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+ for (count = 0; count < vddc_level_count; count++) {
+ /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+
+ if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+ for (count = 0; count < vddgfx_level_count; count++) {
+ index = phm_get_voltage_index(vddgfx_lookup_table,
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+ }
+ } else {
+ for (count = 0; count < vddc_level_count; count++) {
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddGfxVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddGfxVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = tonga_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDC voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDCI voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDGFX voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate MVDD voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_cac_tables(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_Ulv *state)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_DpmTable *table)
+{
+ return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_sclk, engine_clock,
+ &graphic_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find VDDC voltage value for VDDC "
+ "engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 0;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (!result) {
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+ uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+
+ SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i, max_entry;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = tonga_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ }
+ } else {
+ if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+ pr_err("Pcie Dpm Enablemask is 0 !");
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(highest_pcie_level_enabled+1))) != 0)) {
+ highest_pcie_level_enabled++;
+ }
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<lowest_pcie_level_enabled)) == 0)) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+ count++;
+ }
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "Error retrieving Memory Clock Parameters from VBIOS.",
+ return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+ mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF,
+ mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC,
+ mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE,
+ mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+ mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *memory_level
+ )
+{
+ uint32_t mvdd = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_stutter_mode_threshold = 30000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (NULL != pptable_info->vdd_dep_on_mclk) {
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_mclk,
+ memory_clock,
+ &memory_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "can not find MinVddc voltage value from memory VDDC "
+ "voltage dependency table",
+ return result);
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else
+ memory_level->MinMvdd = mvdd;
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
+ memory_level->StutterEnable = 1;
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ } else {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ } else {
+ dll_state_on = data->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio =
+ tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = tonga_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_address =
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size =
+ sizeof(SMU72_Discrete_MemoryLevel) *
+ SMU72_MAX_LEVELS_MEMORY;
+ SMU72_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = tonga_populate_single_memory_level(
+ hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ smio_pattern->Voltage =
+ data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMIO_Pattern voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ table->ACPILevel.MinVoltage =
+ smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVoltage =
+ smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+ /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+ if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t) (mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->UvdLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->UvdLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->UvdLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(
+ hwmgr,
+ table->UvdLevel[count].VclkFrequency,
+ &dividers);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Vclk clock",
+ return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Dclk clock",
+ return result);
+
+ table->UvdLevel[count].DclkDivider =
+ (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ }
+
+ return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t) (mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->VceLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->VceLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->VceLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t) (mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->AcpLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->AcpLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->AcpLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->SamuLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->SamuLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->SamuLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU72_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = tonga_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (!result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU72_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot engine "
+ "clock value in dependency table. "
+ "Using Graphics DPM level 0 !");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot "
+ "engine clock value in dependency table."
+ "Using Memory DPM level 0 !");
+ result = 0;
+ }
+
+ table->BootVoltage.Vddc =
+ phm_get_voltage_id(&(data->vddc_voltage_table),
+ data->vbios_boot_state.vddc_bootup_value);
+ table->BootVoltage.VddGfx =
+ phm_get_voltage_id(&(data->vddgfx_voltage_table),
+ data->vbios_boot_state.vddgfx_bootup_value);
+ table->BootVoltage.Vddci =
+ phm_get_voltage_id(&(data->vddci_voltage_table),
+ data->vbios_boot_state.vddci_bootup_value);
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t hw_revision, dev_id;
+ struct cgs_system_info sys_info = {0};
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+ volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+ (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+ (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+ volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+ (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+ (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+ } else {
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ }
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (tonga_clock_stretch_amount_conversion
+ [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* Splitted mode */
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC and VDDGFX should "
+ "be both on SVI2 control in splitted mode !\n");
+ }
+ } else {
+ /* Merged mode */
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC should be on "
+ "SVI2 control in merged mode !\n");
+ }
+ }
+
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /*
+ * This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result != 0)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ int i, j, k;
+ const uint16_t *pdef1, *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range !",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ dpm_table->BAPM_TEMP_GRADIENT =
+ PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ pdef1 = defaults->bapmti_r;
+ pdef2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU72_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 "
+ "(SviLoadLineEn) from SMC Failed !",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed !",
+ return -EINVAL);
+
+ /* DW6 */
+ if (tonga_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed !",
+ return -EINVAL);
+ /* DW7 */
+ if (tonga_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed !",
+ return -EINVAL);
+ /* DW8 */
+ if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl Failed !",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (tonga_populate_temperature_scaler(hwmgr) != 0)
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed !",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (tonga_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan "
+ "Control parameters Failed !",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (tonga_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed !",
+ return -EINVAL);
+
+ /* DW20 */
+ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(
+ false,
+ "Attempt to populate BapmVddCBaseLeakage "
+ "Hi and Lo Sidd Failed !",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed !",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(
+ i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array "
+ "out of boundary",
+ return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+ const struct tonga_mc_reg_entry *entry,
+ SMU72_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = tonga_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start +
+ (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(
+ hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU72_Discrete_MCRegisterSet) *
+ data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+ result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for the MC register addresses !",
+ return result;);
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for driver state !",
+ return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &tonga_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ uint8_t i;
+ pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ tonga_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ tonga_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+ if (i == 1 || i == 0)
+ table->SystemFlags |= 0x40;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = tonga_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state !",
+ return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = tonga_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level !", return result);
+
+ result = tonga_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level !", return result);
+
+ result = tonga_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level !", return result);
+
+ result = tonga_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level !", return result);
+
+ result = tonga_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level !", return result);
+
+ result = tonga_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+ result = tonga_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level !", return result);
+
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = tonga_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.",
+ return result;);
+
+ result = tonga_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level !", return result);
+
+ result = tonga_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level !", return result);
+
+ tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters !", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = tonga_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table !",
+ return result;);
+ }
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ /*
+ * Cail reads current link status and reports it as cap (we cannot
+ * change this due to some previous issues we had)
+ * SMC drops the link status to lowest level after enabling
+ * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+ * but this time Cail reads current link status which was set to low by
+ * SMC and reports it as cap to powerplay
+ * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+ */
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+ table->PCIeGenInterval = 1;
+
+ result = tonga_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting !", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+
+ if (0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ }
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory !", return result;);
+
+ result = tonga_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload arb data to SMC memory !", return result);
+
+ tonga_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize pm fuses !", return result);
+
+ result = tonga_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize MC Reg table !", return result);
+
+ tonga_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu_data->smu7_data.fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ fan_table.FanControl_GL_Flag = 1;
+
+ res = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table,
+ (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return tonga_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to upload MC reg table !",
+ return result);
+
+ result = tonga_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters !",
+ );
+
+ return result;
+}
+
+static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t tonga_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU72_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU72_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU72_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU72_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU72_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU72_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU72_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU72_MAX_LEVELS_MVDD;
+ }
+ pr_warn("can't get the mac value %x\n", value);
+
+ return 0;
+}
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ tonga_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ tonga_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (result != 0);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (result != 0);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+ &address) ?
+ address :
+ table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct tonga_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device,
+ mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (!result)
+ result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (!result) {
+ tonga_set_s0_mc_reg_index(ni_table);
+ result = tonga_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (!result)
+ tonga_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 8c4f761d5bc8..5d70a00348e2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -25,8 +25,26 @@
#define _TONGA_SMUMGR_H_
#include "smu72_discrete.h"
-
#include "smu7_smumgr.h"
+#include "smu72.h"
+
+
+#define ASICID_IS_TONGA_P(wDID, bRID) \
+ (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+ || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
+
+struct tonga_pt_defaults {
+ uint8_t svi_load_line_en;
+ uint8_t svi_load_line_vddC;
+ uint8_t tdc_vddc_throttle_release_limit_perc;
+ uint8_t tdc_mawt;
+ uint8_t tdc_waterfall_ctl;
+ uint8_t dte_ambient_temp_base;
+ uint32_t display_cac;
+ uint32_t bapm_temp_gradient;
+ uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+ uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+};
struct tonga_mc_reg_entry {
uint32_t mclk_max;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 408514c965a0..2f979fb86824 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -53,20 +53,20 @@
#define smnMP0_FW_INTF 0x3010104
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
+static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
-static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
/*
@@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
/*
* Send a message to the SMC, and wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
/*
* Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
/*
* Send a message to the SMC with parameter, do not wait for response
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int vega10_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+ struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ return vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
}
/*
* Retrieve an argument from SMC.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
/*
* Copy table from SMC into driver FB
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
/*
* Copy table from Driver FB into SMC
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask)
{
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
- return vega10_send_msg_to_smc_with_parameter(smumgr,
+ return vega10_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
- if (!vega10_send_msg_to_smc(smumgr,
+ if (!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
- vega10_read_arg_from_smc(smumgr, features_enabled);
+ vega10_read_arg_from_smc(hwmgr, features_enabled);
return 0;
}
return -EINVAL;
}
-int vega10_set_tools_address(struct pp_smumgr *smumgr)
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
- if (!vega10_send_msg_to_smc_with_parameter(smumgr,
+ if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
priv->smu_tables.entry[TOOLSTABLE].table_addr_high))
- vega10_send_msg_to_smc_with_parameter(smumgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
}
return 0;
}
-static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
+static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
+ vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
dev_id = (uint32_t)sys_info.value;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
rev_id = (uint32_t)sys_info.value;
if (!((dev_id == 0x687f) &&
@@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_init(struct pp_smumgr *smumgr)
+static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv;
uint64_t mc_addr;
@@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
int ret;
struct cgs_firmware_info info = {0};
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
&info);
if (ret || !info.kptr)
@@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for pptable */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(PPTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for pptable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
tools_size = 0x19000;
if (tools_size) {
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
tools_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
}
/* allocate space for AVFS Fuse table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsFuseOverride_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs fuse table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_fini(struct pp_smumgr *smumgr)
+static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
if (priv->smu_tables.entry[TOOLSTABLE].table)
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int vega10_start_smu(struct pp_smumgr *smumgr)
+static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
+ PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
"Failed to verify SMC interface!",
return -EINVAL);
- vega10_set_tools_address(smumgr);
+ vega10_set_tools_address(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 821425c1e4e0..0695455b21b2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -52,19 +52,19 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask);
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled);
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-int vega10_set_tools_address(struct pp_smumgr *smumgr);
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr);
#endif