diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu13')
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 123 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 285 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 335 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c | 537 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 1178 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 277 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 78 |
11 files changed, 2216 insertions, 774 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 7f3493b6c53c..51f1fa9789ab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -24,7 +24,7 @@ # It provides the smu management services for the driver. SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \ - smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o + smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f41ac6465f2a..6de653d2ed62 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -266,9 +266,31 @@ static int aldebaran_tables_init(struct smu_context *smu) return 0; } +static int aldebaran_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + * back for other dies, only send it on master die. + */ + if (adev->smuio.funcs->get_socket_id(adev) || + adev->smuio.funcs->get_die_id(adev)) + return 0; + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else + return -EINVAL; +} + static int aldebaran_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -276,6 +298,20 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = aldebaran_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -759,8 +795,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + return 0; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value); if (ret) { @@ -788,8 +827,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + return 0; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value); if (ret) { @@ -850,7 +892,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } switch (type) { - case SMU_OD_SCLK: case SMU_SCLK: for (i = 0; i < display_levels; i++) { clock_mhz = freq_values[i]; @@ -863,7 +904,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } break; - case SMU_OD_MCLK: case SMU_MCLK: case SMU_SOCCLK: case SMU_FCLK: @@ -1230,6 +1270,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + int r; /* Disable determinism if switching to another mode */ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && @@ -1242,7 +1283,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: return 0; - + case AMD_DPM_FORCED_LEVEL_AUTO: + r = smu_v13_0_set_performance_level(smu, level); + if (!r) + smu_v13_0_reset_custom_level(smu); + return r; case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: @@ -1257,9 +1302,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1288,7 +1334,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1308,7 +1354,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1382,7 +1428,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = aldebaran_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk, false); + if (ret) + return ret; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1401,7 +1451,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: @@ -1581,11 +1631,11 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } -static bool aldebaran_is_baco_supported(struct smu_context *smu) +static int aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ - return false; + return 0; } static int aldebaran_set_df_cstate(struct smu_context *smu, @@ -1603,29 +1653,6 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - - /* The message only works on master die and NACK will be sent - back for other dies, only send it on master die */ - if (adev->smuio.funcs->get_socket_id(adev) || - adev->smuio.funcs->get_die_id(adev)) - return 0; - - if (mode == XGMI_PLPD_DEFAULT) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, NULL); - else if (mode == XGMI_PLPD_DISALLOW) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 1, NULL); - else - return -EINVAL; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -1714,7 +1741,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; - gpu_metrics->average_mm_activity = 0; /* Valid power data is available only from primary die */ if (aldebaran_is_primary(smu)) { @@ -1846,7 +1872,6 @@ static int aldebaran_mode1_reset(struct smu_context *smu) u32 fatal_err, param; int ret = 0; struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; @@ -1859,8 +1884,8 @@ static int aldebaran_mode1_reset(struct smu_context *smu) } else { /* fatal error triggered by ras, PMFW supports the flag from 68.44.0 */ - if ((smu->smc_fw_version >= 0x00442c00) && ras && - atomic_read(&ras->in_recovery)) + if ((smu->smc_fw_version >= 0x00442c00) && + amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -1882,7 +1907,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); - + if (index < 0 ) + return -EINVAL; mutex_lock(&smu->message_lock); if (smu->smc_fw_version >= 0x00441400) { ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); @@ -1959,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int aldebaran_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -2059,18 +2080,16 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = aldebaran_is_baco_supported, + .get_bamaco_support = aldebaran_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, - .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy, .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, - .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 48170bb5112e..1c7235935d14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -79,8 +80,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -92,7 +93,6 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char fw_name[30]; char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -105,9 +105,12 @@ int smu_v13_0_init_microcode(struct smu_context *smu) amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -270,9 +273,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - if (smu->is_apu || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) - adev->pm.fw_version = smu_version; + adev->pm.fw_version = smu_version; /* only for dGPU w/ SMU13*/ if (adev->pm.fw) @@ -530,10 +531,12 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; + kfree(smu_dpm->dpm_policies); kfree(smu_dpm->dpm_context); kfree(smu_dpm->golden_dpm_context); kfree(smu_dpm->dpm_current_power_state); kfree(smu_dpm->dpm_request_power_state); + smu_dpm->dpm_policies = NULL; smu_dpm->dpm_context = NULL; smu_dpm->golden_dpm_context = NULL; smu_dpm->dpm_context_size = 0; @@ -712,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) -{ - int ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); - if (ret) - dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); - - return ret; -} - int smu_v13_0_set_driver_table_location(struct smu_context *smu) { struct smu_table *driver_table = &smu->smu_table.driver_table; @@ -764,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu) return ret; } -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) -{ - int ret = 0; - - if (!smu->pm_enabled) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); - - return ret; -} - int smu_v13_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; @@ -1076,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -int -smu_v13_0_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request - *clock_req) -{ - enum amd_pp_clock_type clk_type = clock_req->clock_type; - int ret = 0; - enum smu_clk_type clk_select = 0; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || - smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - switch (clk_type) { - case amd_pp_dcef_clock: - clk_select = SMU_DCEFCLK; - break; - case amd_pp_disp_clock: - clk_select = SMU_DISPCLK; - break; - case amd_pp_pixel_clock: - clk_select = SMU_PIXCLK; - break; - case amd_pp_phy_clock: - clk_select = SMU_PHYCLK; - break; - case amd_pp_mem_clock: - clk_select = SMU_UCLK; - break; - default: - dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); - ret = -EINVAL; - break; - } - - if (ret) - goto failed; - - if (clk_select == SMU_UCLK && smu->disable_uclk_switch) - return 0; - - ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); - - if (clk_select == SMU_UCLK) - smu->hard_min_uclk_req_from_dal = clk_freq; - } - -failed: - return ret; -} - uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1229,7 +1158,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); @@ -1321,11 +1250,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, return 0; } -static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) +void smu_v13_0_interrupt_work(struct smu_context *smu) { - return smu_cmn_send_smc_msg(smu, - SMU_MSG_ReenableAcDcInterrupt, - NULL); + smu_cmn_send_smc_msg(smu, + SMU_MSG_ReenableAcDcInterrupt, + NULL); } #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ @@ -1378,12 +1307,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, switch (ctxid) { case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(smu); + schedule_work(&smu->interrupt_work); adev->pm.ac_power = true; break; case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(smu); + schedule_work(&smu->interrupt_work); adev->pm.ac_power = false; break; case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: @@ -1559,22 +1488,9 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t clock_limit; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -1622,7 +1538,8 @@ failed: int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1637,7 +1554,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1645,7 +1565,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1656,45 +1579,6 @@ out: return ret; } -int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) -{ - int ret = 0, clk_id = 0; - uint32_t param; - - if (min <= 0 && max <= 0) - return -EINVAL; - - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; - - clk_id = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_CLK, - clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param, NULL); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param, NULL); - if (ret) - return ret; - } - - return ret; -} - int smu_v13_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { @@ -1722,6 +1606,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1753,6 +1638,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1794,13 +1680,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, vclk_min = vclk_max = 0; dclk_min = dclk_max = 0; fclk_min = fclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1812,7 +1700,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1824,7 +1713,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1839,7 +1729,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1854,7 +1745,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1866,7 +1758,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1894,6 +1787,40 @@ int smu_v13_0_set_power_source(struct smu_context *smu, NULL); } +int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret = 0; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + *value = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + *value = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + *value = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + *value = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + *value = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + *value = smu->smu_table.boot_values.dclk; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint16_t level, uint32_t *value) @@ -1905,7 +1832,7 @@ int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, return -EINVAL; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; + return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value); clk_id = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, @@ -2068,21 +1995,18 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) } int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; - int i, ret = 0; + int ret = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + if (adev->vcn.harvest_config & (1 << inst)) + return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - i << 16U, NULL); - if (ret) - return ret; - } + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + inst << 16U, NULL); return ret; } @@ -2247,7 +2171,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { @@ -2268,33 +2192,36 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) +int smu_v13_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } int smu_v13_0_baco_enter(struct smu_context *smu) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); } else { ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); @@ -2561,3 +2488,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, return ret; } + +void smu_v13_0_reset_custom_level(struct smu_context *smu) +{ + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + + pstate_table->uclk_pstate.custom.min = 0; + pstate_table->uclk_pstate.custom.max = 0; + pstate_table->gfxclk_pstate.custom.min = 0; + pstate_table->gfxclk_pstate.custom.max = 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 67117ced7c6a..5a9711e8cf68 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -107,6 +107,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -124,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -138,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), @@ -147,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), - MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0), MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), @@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_0_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static int smu_v13_0_0_system_features_control(struct smu_context *smu, bool en) { @@ -847,6 +836,10 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_MEMACTIVITY: *value = metrics->AverageUclkActivity; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_SOCKETPOWER: *value = metrics->AverageSocketPower << 8; break; @@ -973,6 +966,12 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -1143,6 +1142,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1463,6 +1470,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1560,6 +1603,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1853,6 +1906,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_0_od_restore_table_single(smu, input[0]); @@ -1975,7 +2070,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2122,7 +2218,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) @@ -2188,6 +2288,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_0_set_supported_od_feature_mask(smu); @@ -2477,97 +2581,132 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9 +#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; - u32 workload_mask; + int ret, idx; - smu->power_profile_mode = input[size]; - - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } - - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - } + idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); + return ret; +} - if (workload_type < 0) - return -EINVAL; +static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int workload_type, ret, idx = -1, i; - workload_mask = 1 << workload_type; + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7300))) || - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - smu->adev->pm.fw_version >= 0x00504500)) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) && + ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500))) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + backend_workload_mask |= 1 << workload_type; + } + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; } + ret = smu_v13_0_0_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE); } - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - workload_mask, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) @@ -2781,10 +2920,9 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, uint32_t *param) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if ((smu->smc_fw_version >= supported_version) && - ras && atomic_read(&ras->in_recovery)) + amdgpu_ras_get_fed_status(adev)) /* Set RAS fatal error reset flag */ *param = 1 << 16; else @@ -3018,7 +3156,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .i2c_init = smu_v13_0_0_i2c_control_init, .i2c_fini = smu_v13_0_0_i2c_control_fini, .is_dpm_running = smu_v13_0_0_is_dpm_running, - .dump_pptable = smu_v13_0_0_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, @@ -3076,7 +3213,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, @@ -3093,6 +3230,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check, .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, + .interrupt_work = smu_v13_0_interrupt_work, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) @@ -3106,4 +3244,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_0_workload_map; smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION; smu_v13_0_0_set_smu_mailbox_registers(smu); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(smu->adev)) + smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c new file mode 100644 index 000000000000..e0d356f93ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -0,0 +1,537 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "smu_v13_0_12_pmfw.h" +#include "smu_v13_0_6_ppt.h" +#include "smu_v13_0_12_ppsmc.h" +#include "smu_v13_0.h" +#include "amdgpu_xgmi.h" +#include "amdgpu_fru_eeprom.h" +#include <linux/pci.h> +#include "smu_cmn.h" + +#undef MP1_Public +#undef smnMP1_FIRMWARE_FLAGS + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \ + [smu_feature] = { 1, (smu_13_0_12_feature) } + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE \ + (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK)) + +#define NUM_JPEG_RINGS_FW 10 +#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ + (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + +const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), +}; + +// clang-format off +const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), + MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), + MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), + MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), + MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), + MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), + MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), + MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), + MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), + MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), + MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), + MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), + MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), + MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), + MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), + MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), + MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), +}; + +static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, + uint64_t *feature_mask) +{ + int ret; + + ret = smu_cmn_get_enabled_mask(smu, feature_mask); + + if (ret == -EIO) { + *feature_mask = 0; + ret = 0; + } + + return ret; +} + +static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct amdgpu_fru_info *fru_info; + struct amdgpu_device *adev = smu->adev; + + if (!adev->fru_info) { + adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL); + if (!adev->fru_info) + return -ENOMEM; + } + + fru_info = adev->fru_info; + strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber, + sizeof(fru_info->product_number)); + strscpy(fru_info->product_name, static_metrics->ProductInfo.Name, + sizeof(fru_info->product_name)); + strscpy(fru_info->serial, static_metrics->ProductInfo.Serial, + sizeof(fru_info->serial)); + strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName, + sizeof(fru_info->manufacturer_name)); + strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId, + sizeof(fru_info->fru_id)); + + return 0; +} + +int smu_v13_0_12_get_max_metrics_size(void) +{ + return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); +} + +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t table_version; + int ret, i; + + if (!pptable->Init) { + ret = smu_v13_0_6_get_static_metrics_table(smu); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + + pptable->MaxSocketPowerLimit = + SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit); + pptable->MaxGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency); + pptable->MinGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MinGfxclkFrequency); + + for (i = 0; i < 4; ++i) { + pptable->FclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]); + pptable->UclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]); + pptable->SocclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]); + pptable->VclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]); + pptable->DclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]); + pptable->LclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]); + } + + /* use AID0 serial number by default */ + pptable->PublicSerialNumber_AID = + static_metrics->PublicSerialNumber_AID[0]; + ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics); + if (ret) + return ret; + + pptable->Init = true; + } + + return 0; +} + +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu) +{ + int ret; + uint64_t feature_enabled; + + ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled); + + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + int xcc_id; + + /* For clocks with multiple instances, only report the first one */ + switch (member) { + case METRICS_CURR_GFXCLK: + case METRICS_AVERAGE_GFXCLK: + xcc_id = GET_INST(GC, 0); + *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + break; + case METRICS_CURR_SOCCLK: + case METRICS_AVERAGE_SOCCLK: + *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]); + break; + case METRICS_CURR_UCLK: + case METRICS_AVERAGE_UCLK: + *value = SMUQ10_ROUND(metrics->UclkFrequency); + break; + case METRICS_CURR_VCLK: + *value = SMUQ10_ROUND(metrics->VclkFrequency[0]); + break; + case METRICS_CURR_DCLK: + *value = SMUQ10_ROUND(metrics->DclkFrequency[0]); + break; + case METRICS_CURR_FCLK: + *value = SMUQ10_ROUND(metrics->FclkFrequency); + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = SMUQ10_ROUND(metrics->SocketGfxBusy); + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization); + break; + case METRICS_CURR_SOCKETPOWER: + *value = SMUQ10_ROUND(metrics->SocketPower) << 8; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + /* This is the max of all VRs and not just SOC VR. + * No need to define another data type for the same. + */ + case METRICS_TEMPERATURE_VRSOC: + *value = SMUQ10_ROUND(metrics->MaxVrTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) +{ + const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + MetricsTable_t *metrics; + int inst, j, k, idx; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instance */ + inst = GET_INST(VCN, k); + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(metrics-> + JpegBusy[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + metrics->VclkFrequency[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + metrics->DclkFrequency[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + metrics->SocclkFrequency[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(metrics->UclkFrequency); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]); + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + + return sizeof(*xcp_metrics); +} + +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + int ret = 0, xcc_id, inst, i, j, k, idx; + struct amdgpu_device *adev = smu->adev; + u8 num_jpeg_rings_gpu_metrics; + MetricsTable_t *metrics; + struct amdgpu_xcp *xcp; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); + + gpu_metrics->temperature_hotspot = + SMUQ10_ROUND(metrics->MaxSocketTemperature); + /* Individual HBM stack temperature is not reported */ + gpu_metrics->temperature_mem = + SMUQ10_ROUND(metrics->MaxHbmTemperature); + /* Reports max temperature of all voltage rails */ + gpu_metrics->temperature_vrsoc = + SMUQ10_ROUND(metrics->MaxVrTemperature); + + gpu_metrics->average_gfx_activity = + SMUQ10_ROUND(metrics->SocketGfxBusy); + gpu_metrics->average_umc_activity = + SMUQ10_ROUND(metrics->DramBandwidthUtilization); + + gpu_metrics->mem_max_bandwidth = + SMUQ10_ROUND(metrics->MaxDramBandwidth); + + gpu_metrics->curr_socket_power = + SMUQ10_ROUND(metrics->SocketPower); + /* Energy counter reported in 15.259uJ (2^-16) units */ + gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; + + for (i = 0; i < MAX_GFX_CLKS; i++) { + xcc_id = GET_INST(GC, i); + if (xcc_id >= 0) + gpu_metrics->current_gfxclk[i] = + SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + + if (i < MAX_CLKS) { + gpu_metrics->current_socclk[i] = + SMUQ10_ROUND(metrics->SocclkFrequency[i]); + inst = GET_INST(VCN, i); + if (inst >= 0) { + gpu_metrics->current_vclk0[i] = + SMUQ10_ROUND(metrics->VclkFrequency[inst]); + gpu_metrics->current_dclk0[i] = + SMUQ10_ROUND(metrics->DclkFrequency[inst]); + } + } + } + + gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = metrics->AccumulationCounter; + + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc; + gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc; + gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc; + gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc; + gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc; + + /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ + gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0); + + gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth; + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(metrics->PCIeLinkSpeed); + gpu_metrics->pcie_bandwidth_acc = + SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]); + gpu_metrics->pcie_bandwidth_inst = + SMUQ10_ROUND(metrics->PcieBandwidth[0]); + gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc; + gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc; + gpu_metrics->pcie_replay_rover_count_acc = + metrics->PCIenReplayARolloverCountAcc; + gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc; + gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc; + gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc); + gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); + + for (i = 0; i < NUM_XGMI_LINKS; i++) { + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = + SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); + gpu_metrics->xgmi_write_data_acc[j] = + SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[j] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * num_jpeg_rings_gpu_metrics) + j] = + SMUQ10_ROUND(metrics->JpegBusy + [(inst * NUM_JPEG_RINGS_FW) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + idx++; + } + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + } + + gpu_metrics->xgmi_link_width = metrics->XgmiWidth; + gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; + + gpu_metrics->firmware_timestamp = metrics->Timestamp; + + *table = (void *)gpu_metrics; + + return sizeof(*gpu_metrics); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 949131bd1ecb..b081ae3e8f43 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -227,14 +227,16 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) int ret = 0; if (!en && !adev->in_s0ix) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } @@ -328,7 +330,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->AverageSocketPower << 8) / 1000; @@ -582,6 +584,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_4_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -750,31 +758,9 @@ static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 0dce672ac1b9..f5db181ef489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en) return ret; } -static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -286,7 +288,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -332,6 +334,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_5_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = smu_v13_0_5_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, @@ -637,7 +645,7 @@ static int smu_v13_0_5_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -727,31 +735,9 @@ static int smu_v13_0_5_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -827,9 +813,10 @@ failed: } static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -966,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1062,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1076,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1085,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 3957af057d54..f00ef7f3f355 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -68,6 +68,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) @@ -95,7 +96,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define LINK_SPEED_MAX 4 - #define SMU_13_0_6_DSCLK_THRESHOLD 140 #define MCA_BANK_IPID(_ip, _hwid, _type) \ @@ -120,6 +120,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +#define P2S_TABLE_ID_3 0x50325303 // clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { @@ -138,13 +139,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -167,12 +168,16 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), - MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0), - MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0), - MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), - MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), + MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0), }; // clang-format on @@ -209,7 +214,11 @@ static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_CO SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), - SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), }; #define TABLE_PMSTATUSLOG 0 @@ -231,27 +240,13 @@ static const uint8_t smu_v13_0_6_throttler_map[] = { [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), }; -struct PPTable_t { - uint32_t MaxSocketPowerLimit; - uint32_t MaxGfxclkFrequency; - uint32_t MinGfxclkFrequency; - uint32_t FclkFrequencyTable[4]; - uint32_t UclkFrequencyTable[4]; - uint32_t SocclkFrequencyTable[4]; - uint32_t VclkFrequencyTable[4]; - uint32_t DclkFrequencyTable[4]; - uint32_t LclkFrequencyTable[4]; - uint32_t MaxLclkDpmRange; - uint32_t MinLclkDpmRange; - uint64_t PublicSerialNumber_AID; - bool Init; -}; - -#define SMUQ10_TO_UINT(x) ((x) >> 10) -#define SMUQ10_FRAC(x) ((x) & 0x3ff) -#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ - (metrics_a->field) : (metrics_x->field)) +#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\ + (metrics_v0->field) : (metrics_v2->field)) +#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\ + (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version)) +#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\ + sizeof(MetricsTableV1_t),\ + sizeof(MetricsTableV2_t))) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -260,6 +255,187 @@ struct smu_v13_0_6_dpm_map { uint32_t *freq_table; }; +static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu) +{ + if ((smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900) + return METRICS_VERSION_V1; + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12)) + return METRICS_VERSION_V2; + + return METRICS_VERSION_V0; +} + +static inline void smu_v13_0_6_cap_set(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + dpm_context->caps |= BIT_ULL(cap); +} + +static inline void smu_v13_0_6_cap_clear(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + dpm_context->caps &= ~BIT_ULL(cap); +} + +bool smu_v13_0_6_cap_supported(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + return !!(dpm_context->caps & BIT_ULL(cap)); +} + +static void smu_v13_0_14_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(DPM_POLICY), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND) }; + uint32_t fw_ver = smu->smc_fw_version; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver >= 0x05550E00) + smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); + if (fw_ver >= 0x05550B00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + if (fw_ver >= 0x5551200) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + if (fw_ver >= 0x5551600) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } +} + +static void smu_v13_0_12_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(PER_INST_METRICS) }; + uint32_t fw_ver = smu->smc_fw_version; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver < 0x00561900) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM)); + + if (fw_ver >= 0x00561700) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + + if (fw_ver >= 0x00561E00) + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + + if (fw_ver >= 0x00562500) + smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); +} + +static void smu_v13_0_6_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(DPM_POLICY), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND) }; + struct amdgpu_device *adev = smu->adev; + uint32_t fw_ver = smu->smc_fw_version; + uint32_t pgm = (fw_ver >> 24) & 0xFF; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver < 0x552F00) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM)); + if (fw_ver < 0x554500) + smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT)); + + if (adev->flags & AMD_IS_APU) { + smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); + + if (fw_ver >= 0x04556A00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + } else { + if (fw_ver >= 0x557600) + smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); + if (fw_ver < 0x00556000) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY)); + if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600)) + smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX)); + if (fw_ver < 0x556300) + smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS)); + if (fw_ver < 0x554800) + smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE)); + if (fw_ver >= 0x556F00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + if (fw_ver < 0x00555a00) + smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); + if (fw_ver < 0x00555600) + smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); + if ((pgm == 7 && fw_ver >= 0x7550E00) || + (pgm == 0 && fw_ver >= 0x00557E00)) + smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + if ((pgm == 0 && fw_ver >= 0x00557F01) || + (pgm == 7 && fw_ver >= 0x7551000)) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + } + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } + if (((pgm == 7) && (fw_ver >= 0x7550700)) || + ((pgm == 0) && (fw_ver >= 0x00557900)) || + ((pgm == 4) && (fw_ver >= 0x4557000))) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); +} + +static void smu_v13_0_x_init_caps(struct smu_context *smu) +{ + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + return smu_v13_0_12_init_caps(smu); + case IP_VERSION(13, 0, 14): + return smu_v13_0_14_init_caps(smu); + default: + return smu_v13_0_6_init_caps(smu); + } +} + +static int smu_v13_0_6_check_fw_version(struct smu_context *smu) +{ + int r; + + r = smu_v13_0_check_fw_version(smu); + /* Initialize caps flags once fw version is fetched */ + if (!r) + smu_v13_0_x_init_caps(smu); + + return r; +} + static int smu_v13_0_6_init_microcode(struct smu_context *smu) { const struct smc_firmware_header_v2_1 *v2_1; @@ -269,22 +445,24 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t p2s_table_id = P2S_TABLE_ID_A; int ret = 0, i, p2stable_count; + int var = (adev->pdev->device & 0xF); char ucode_prefix[15]; - char fw_name[30]; - /* No need to load P2S tables in IOV mode */ - if (amdgpu_sriov_vf(adev)) + /* No need to load P2S tables in IOV mode or for smu v13.0.12 */ + if (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))) return 0; - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { p2s_table_id = P2S_TABLE_ID_X; + if (var == 0x5) + p2s_table_id = P2S_TABLE_ID_3; + } amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (ret) goto out; @@ -329,13 +507,15 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; struct amdgpu_device *adev = smu->adev; + int gpu_metrcs_size = METRICS_TABLE_SIZE; if (!(adev->flags & AMD_IS_APU)) SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, - max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), + max(gpu_metrcs_size, + smu_v13_0_12_get_max_metrics_size()), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); @@ -343,13 +523,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t), - sizeof(MetricsTableA_t)), GFP_KERNEL); + smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -368,9 +547,78 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return 0; } +static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu, + int policy) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (policy) { + case SOC_PSTATE_DEFAULT: + param = 0; + break; + case SOC_PSTATE_0: + param = 1; + break; + case SOC_PSTATE_1: + param = 2; + break; + case SOC_PSTATE_2: + param = 3; + break; + default: + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy, + param, NULL); + + if (ret) + dev_err(adev->dev, "select soc pstate policy %d failed", + policy); + + return ret; +} + +static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (level) { + case XGMI_PLPD_DEFAULT: + param = PPSMC_PLPD_MODE_DEFAULT; + break; + case XGMI_PLPD_OPTIMIZED: + param = PPSMC_PLPD_MODE_OPTIMIZED; + break; + case XGMI_PLPD_DISALLOW: + param = 0; + break; + default: + return -EINVAL; + } + + if (level == XGMI_PLPD_DISALLOW) + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, param, NULL); + else + /* change xgmi per-link power down policy */ + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_SelectPLPDMode, param, NULL); + + if (ret) + dev_err(adev->dev, + "select xgmi per-link power down policy %d failed\n", + level); + + return ret; +} + static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -378,6 +626,36 @@ static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + if (!smu_dpm->dpm_policies) { + kfree(smu_dpm->dpm_context); + return -ENOMEM; + } + + if (!(smu->adev->flags & AMD_IS_APU)) { + policy = &(smu_dpm->dpm_policies->policies[0]); + + policy->policy_type = PP_PM_POLICY_SOC_PSTATE; + policy->level_mask = BIT(SOC_PSTATE_DEFAULT) | + BIT(SOC_PSTATE_0) | BIT(SOC_PSTATE_1) | + BIT(SOC_PSTATE_2); + policy->current_level = SOC_PSTATE_DEFAULT; + policy->set_policy = smu_v13_0_6_select_policy_soc_pstate; + smu_cmn_generic_soc_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= + BIT(PP_PM_POLICY_SOC_PSTATE); + } + policy = &(smu_dpm->dpm_policies->policies[1]); + + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT) | + BIT(XGMI_PLPD_OPTIMIZED); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = smu_v13_0_6_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -463,7 +741,7 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header)); pm_metrics->common_header.mp1_ip_discovery_version = - IP_VERSION(13, 0, 6); + amdgpu_ip_version(smu->adev, MP1_HWIP, 0); pm_metrics->common_header.pmfw_version = pmfw_version; pm_metrics->common_header.pmmetrics_version = table_version; pm_metrics->common_header.structure_size = @@ -472,17 +750,61 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, return pm_metrics->common_header.structure_size; } +static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; +} + +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; + struct smu_table *table = &smu_table->driver_table; + int ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export static metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + + return 0; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - struct amdgpu_device *adev = smu->adev; + int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_setup_driver_pptable(smu); + /* Store one-time values in driver PPTable */ if (!pptable->Init) { while (--retry) { @@ -491,7 +813,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter)) + if (GET_METRIC_FIELD(AccumulationCounter, version)) break; usleep_range(1000, 1100); @@ -508,31 +830,38 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; + pptable->PublicSerialNumber_AID = + GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0]; pptable->Init = true; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_6_get_static_metrics_table(smu); + if (ret) + return ret; + smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); + } } return 0; @@ -636,6 +965,15 @@ static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu, return ret; } +static void smu_v13_0_6_pm_policy_init(struct smu_context *smu) +{ + struct smu_dpm_policy *policy; + + policy = smu_get_pm_policy(smu, PP_PM_POLICY_SOC_PSTATE); + if (policy) + policy->current_level = SOC_PSTATE_DEFAULT; +} + static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -665,6 +1003,15 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) smu_v13_0_6_setup_driver_pptable(smu); + /* DPM policy not supported in older firmwares */ + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) { + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_policies->policy_mask &= + ~BIT(PP_PM_POLICY_SOC_PSTATE); + } + + smu_v13_0_6_pm_policy_init(smu); /* gfxclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.gfx_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { @@ -832,8 +1179,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; + int version = smu_v13_0_6_get_metrics_version(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -842,56 +1191,60 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, if (ret) return ret; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_smu_metrics_data(smu, member, value); + /* For clocks with multiple instances, only report the first one */ switch (member) { case METRICS_CURR_GFXCLK: case METRICS_AVERAGE_GFXCLK: - if (smu->smc_fw_version >= 0x552F00) { + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1010,8 +1363,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + break; case SMU_SCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -1052,8 +1408,14 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); - fallthrough; + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX))) + return 0; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + break; case SMU_MCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); @@ -1304,6 +1666,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; int ret = 0; if (amdgpu_ras_intr_triggered()) @@ -1348,6 +1711,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VDDBOARD: + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + *(uint32_t *)data = dpm_context->board_volt; + *size = 4; + break; + } else { + ret = -EOPNOTSUPP; + break; + } case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; @@ -1527,7 +1899,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu) static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) { /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */ - if (smu->smc_fw_version < 0x554800) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE))) return 0; return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, @@ -1610,12 +1982,12 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { /* Min UCLK is not expected to be changed */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, uclk_table->max); + smu, SMU_UCLK, 0, uclk_table->max, false); if (ret) return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; } - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); return 0; case AMD_DPM_FORCED_LEVEL_MANUAL: @@ -1624,12 +1996,13 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, break; } - return -EINVAL; + return -EOPNOTSUPP; } static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1670,9 +2043,13 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (clk_type == SMU_UCLK) { if (max == pstate_table->uclk_pstate.curr.max) return 0; + /* For VF, only allowed in FW versions 85.102 or greater */ + if (!smu_v13_0_6_cap_supported(smu, + SMU_CAP(SET_UCLK_MAX))) + return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, max); + smu, SMU_UCLK, 0, max, false); if (!ret) pstate_table->uclk_pstate.curr.max = max; } @@ -1812,7 +2189,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = dpm_context->dpm_tables.gfx_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1820,10 +2197,10 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.uclk_table.min; max_clk = dpm_context->dpm_tables.uclk_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1844,7 +2221,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = pstate_table->gfxclk_pstate.custom.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1855,7 +2232,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->uclk_pstate.curr.min; max_clk = pstate_table->uclk_pstate.custom.max; return smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); } break; default: @@ -1872,7 +2249,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, ret = smu_cmn_get_enabled_mask(smu, feature_mask); - if (ret == -EIO && smu->smc_fw_version < 0x552F00) { + if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { *feature_mask = 0; ret = 0; } @@ -1885,6 +2262,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) int ret; uint64_t feature_enabled; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) + return smu_v13_0_12_is_dpm_running(smu); + ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled); if (ret) @@ -1973,8 +2353,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_v13_0_6_request_i2c_xfer(smu, req); - if (r) - goto fail; + if (r) { + /* Retry once, in case of an i2c collision */ + r = smu_v13_0_6_request_i2c_xfer(smu, req); + if (r) + goto fail; + } for (c = i = 0; i < num_msgs; i++) { if (!(msg[i].flags & I2C_M_RD)) { @@ -2077,11 +2461,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu) adev->unique_id = pptable->PublicSerialNumber_AID; } -static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) +static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu) { /* smu_13_0_6 does not support baco */ - return false; + return 0; } static const char *const throttling_logging_label[] = { @@ -2159,76 +2543,233 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) return pcie_gen_to_speed(speed_level + 1); } +static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, + void *table) +{ + const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + int version = smu_v13_0_6_get_metrics_version(smu); + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + int ret, inst, i, j, k, idx; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; + u32 inst_mask; + bool per_inst; + + if (!table) + return sizeof(*xcp_metrics); + + for_each_xcp(adev->xcp_mgr, xcp, i) { + if (xcp->id == xcp_id) + break; + } + if (i == adev->xcp_mgr->num_xcps) + return -EINVAL; + + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + if (!metrics_v0) + return -ENOMEM; + + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + if (ret) { + kfree(metrics_v0); + return ret; + } + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); + goto out; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(VclkFrequency, version)[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(DclkFrequency, version)[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(SocclkFrequency, version)[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); + + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, + version)[inst]); + + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + xcp_metrics->gfx_below_host_limit_thm_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + xcp_metrics->gfx_low_utilization_acc + [idx] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); + } + idx++; + } + } +out: + kfree(metrics_v0); + + return sizeof(*xcp_metrics); +} + static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_5 *gpu_metrics = - (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + int version = smu_v13_0_6_get_metrics_version(smu); + int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i, j; - MetricsTableX_t *metrics_x; - MetricsTableA_t *metrics_a; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; u16 link_width_level; + ssize_t num_bytes; + u8 num_jpeg_rings; + u32 inst_mask; + bool per_inst; - metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); if (ret) { - kfree(metrics_x); + kfree(metrics_v0); return ret; } - metrics_a = (MetricsTableA_t *)metrics_x; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + kfree(metrics_v0); + return num_bytes; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); + + gpu_metrics->mem_max_bandwidth = + SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, + version)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, + version)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); - /* Throttle status is not reported through metrics now */ - gpu_metrics->throttle_status = 0; + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); + + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version); + gpu_metrics->hbm_thm_residency_acc = + GET_METRIC_FIELD(HbmThmResidencyAcc, version); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, + version) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - if (!amdgpu_sriov_vf(adev)) { + /*Check smu version, PCIE link speed and width will be reported from pmfw metric + * table for both pf & one vf for smu version 85.99.0 or higher else report only + * for pf from registers + */ + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) { + gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version); + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version)); + } else if (!amdgpu_sriov_vf(adev)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); if (link_width_level > MAX_LINK_WIDTH) link_width_level = 0; @@ -2238,62 +2779,122 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } + gpu_metrics->pcie_bandwidth_acc = - SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]); gpu_metrics->pcie_bandwidth_inst = - SMUQ10_ROUND(metrics_x->PcieBandwidth[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]); gpu_metrics->pcie_l0_to_recov_count_acc = - metrics_x->PCIeL0ToRecoveryCountAcc; + GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version); gpu_metrics->pcie_replay_count_acc = - metrics_x->PCIenReplayAAcc; + GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version); gpu_metrics->pcie_replay_rover_count_acc = - metrics_x->PCIenReplayARolloverCountAcc; + GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version); gpu_metrics->pcie_nak_sent_count_acc = - metrics_x->PCIeNAKSentCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version); gpu_metrics->pcie_nak_rcvd_count_acc = - metrics_x->PCIeNAKReceivedCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS))) + gpu_metrics->pcie_lc_perf_other_end_recovery = + GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); - gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); - } - - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - inst = GET_INST(JPEG, i); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) - [(inst * adev->jpeg.num_jpeg_rings) + j]); + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[j] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) + [(inst * num_jpeg_rings) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + idx++; + } - } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - inst = GET_INST(VCN, i); - gpu_metrics->vcn_activity[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); + gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version); + gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); *table = (void *)gpu_metrics; - kfree(metrics_x); + kfree(metrics_v0); return sizeof(*gpu_metrics); } +static void smu_v13_0_6_restore_pci_config(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < 16; i++) + pci_write_config_dword(adev->pdev, i * 4, + adev->pdev->saved_config_space[i]); + pci_restore_msi_state(adev->pdev); +} + static int smu_v13_0_6_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -2302,6 +2903,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); + if (index < 0) + return index; mutex_lock(&smu->message_lock); @@ -2315,6 +2918,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) /* Restore the config space saved during init */ amdgpu_device_load_pci_state(adev->pdev); + /* Certain platforms have switches which assign virtual BAR values to + * devices. OS uses the virtual BAR values and device behind the switch + * is assgined another BAR value. When device's config space registers + * are queried, switch returns the virtual BAR values. When mode-2 reset + * is performed, switch is unaware of it, and will continue to return + * the same virtual values to the OS.This affects + * pci_restore_config_space() API as it doesn't write the value saved if + * the current value read from config space is the same as what is + * saved. As a workaround, make sure the config space is restored + * always. + */ + if (!(adev->flags & AMD_IS_APU)) + smu_v13_0_6_restore_pci_config(smu); + dev_dbg(smu->adev->dev, "wait for reset ack\n"); do { ret = smu_cmn_wait_for_response(smu); @@ -2355,7 +2972,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu, return -EINVAL; /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */ - if (smu->smc_fw_version < 0x554500) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT))) return 0; /* Get SOC Max operating temperature */ @@ -2409,24 +3026,14 @@ failed: static int smu_v13_0_6_mode1_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_hive_info *hive = NULL; - u32 hive_ras_recovery = 0; - struct amdgpu_ras *ras; u32 fatal_err, param; int ret = 0; - hive = amdgpu_get_xgmi_hive(adev); - ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; - if (hive) { - hive_ras_recovery = atomic_read(&hive->ras_recovery); - amdgpu_put_xgmi_hive(hive); - } - /* fatal error triggered by ras, PMFW supports the flag */ - if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + if (amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -2439,14 +3046,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu) return ret; } +static int smu_v13_0_6_link_reset(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_4, NULL); + return ret; +} + static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) { return true; } -static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) +static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu) { - return true; + struct amdgpu_device *adev = smu->adev; + int var = (adev->pdev->device & 0xF); + + if (var == 0x1) + return true; + + return false; } static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, @@ -2467,11 +3089,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) { - struct amdgpu_device *adev = smu->adev; int ret; /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */ - if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG))) return 0; ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL); @@ -2483,6 +3104,56 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) return ret; } +/** + * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported + * @smu: smu_context pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns false if the capability is not supported. + */ +static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu) +{ + bool ret = true; + + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) { + dev_info(smu->adev->dev, + "SDMA reset capability is not supported\n"); + ret = false; + } + + return ret; +} + +static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + if (!smu_v13_0_6_reset_sdma_is_supported(smu)) + return -EOPNOTSUPP; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_ResetSDMA, inst_mask, NULL); + if (ret) + dev_err(smu->adev->dev, + "failed to send ResetSDMA event with mask 0x%x\n", + inst_mask); + + return ret; +} + +static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL); + if (ret) + dev_err(smu->adev->dev, + "failed to send ResetVCN event with mask 0x%x\n", + inst_mask); + return ret; +} + + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2671,6 +3342,11 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct umc_v12_0_is_correctable_error(adev, status0)) *count = (ext_error_code == 0) ? odecc_err_cnt : 1; + amdgpu_umc_update_ecc_status(adev, + entry->regs[MCA_REG_IDX_STATUS], + entry->regs[MCA_REG_IDX_IPID], + entry->regs[MCA_REG_IDX_ADDR]); + return 0; } @@ -2684,7 +3360,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); - if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && + (ext_error_code == 0 || ext_error_code == 9)) *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) *count = err_cnt; @@ -2787,7 +3464,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd if (instlo != 0x03b30400) return false; - if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) { + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) { errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]); errcode &= 0xff; } else { @@ -2804,6 +3481,16 @@ static int mmhub_err_codes[] = { CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, }; +static int vcn_err_codes[] = { + CODE_VIDD, CODE_VIDV, +}; +static int jpeg_err_codes[] = { + CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D, + CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D, + CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D, + CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D, +}; + static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, @@ -2832,6 +3519,20 @@ static const struct mca_ras_info mca_ras_table[] = { .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, .ip = AMDGPU_MCA_IP_PCS_XGMI, .get_err_count = mca_pcs_xgmi_mca_get_err_count, + }, { + .blkid = AMDGPU_RAS_BLOCK__VCN, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = vcn_err_codes, + .err_code_count = ARRAY_SIZE(vcn_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__JPEG, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = jpeg_err_codes, + .err_code_count = ARRAY_SIZE(jpeg_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, }; @@ -2877,55 +3578,6 @@ static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_i return true; } -static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - struct mca_bank_entry entry; - uint32_t mca_cnt; - int i, ret; - - ret = mca_get_valid_mca_count(adev, type, &mca_cnt); - if (ret) - return ret; - - /* if valid mca bank count is 0, the driver can return 0 directly */ - if (!mca_cnt) - return 0; - - for (i = 0; i < mca_cnt; i++) { - memset(&entry, 0, sizeof(entry)); - ret = mca_get_mca_entry(adev, type, i, &entry); - if (ret) - return ret; - - if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry)) - continue; - - ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry); - if (ret) - return ret; - } - - return 0; -} - -static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - const struct mca_ras_info *mca_ras = NULL; - - if (!mca_set) - return -EINVAL; - - if (blk != AMDGPU_RAS_BLOCK_COUNT) { - mca_ras = mca_get_mca_ras_info(adev, blk); - if (!mca_ras) - return -EOPNOTSUPP; - } - - return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set); -} - static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { @@ -2962,7 +3614,6 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .max_ue_count = 12, .max_ce_count = 12, .mca_set_debug_mode = mca_smu_set_debug_mode, - .mca_get_ras_mca_set = mca_smu_get_ras_mca_set, .mca_parse_mca_error_count = mca_smu_parse_mca_error_count, .mca_get_mca_entry = mca_smu_get_mca_entry, .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, @@ -2975,7 +3626,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return smu_v13_0_6_mca_set_debug_mode(smu, enable); } -static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count) { uint32_t msg; int ret; @@ -2984,10 +3635,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err return -EINVAL; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_QueryValidMcaCount; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_QueryValidMcaCeCount; break; default: @@ -3004,14 +3655,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err } static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, - enum aca_error_type type, u32 *count) + enum aca_smu_type type, u32 *count) { struct smu_context *smu = adev->powerplay.pp_handle; int ret; switch (type) { - case ACA_ERROR_TYPE_UE: - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_UE: + case ACA_SMU_TYPE_CE: ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); break; default: @@ -3022,16 +3673,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, return ret; } -static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val) { uint32_t msg, param; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_McaBankDumpDW; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_McaBankCeDumpDW; break; default: @@ -3043,7 +3694,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); } -static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val, int count) { int ret, i; @@ -3060,7 +3711,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ return 0; } -static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type, int idx, int reg_idx, u64 *val) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3077,13 +3728,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ *val = (u64)data[1] << 32 | data[0]; dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", - type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); return 0; } static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, - enum aca_error_type type, int idx, struct aca_bank *bank) + enum aca_smu_type type, int idx, struct aca_bank *bank) { int i, ret, count; @@ -3097,52 +3748,28 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, return 0; } +static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int error_code; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + else + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .max_ue_bank_count = 12, .max_ce_bank_count = 12, .set_debug_mode = aca_smu_set_debug_mode, .get_valid_aca_count = aca_smu_get_valid_aca_count, .get_valid_aca_bank = aca_smu_get_valid_aca_bank, + .parse_error_code = aca_smu_parse_error_code, }; -static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - int ret, param; - - switch (mode) { - case XGMI_PLPD_DEFAULT: - param = PPSMC_PLPD_MODE_DEFAULT; - break; - case XGMI_PLPD_OPTIMIZED: - param = PPSMC_PLPD_MODE_OPTIMIZED; - break; - case XGMI_PLPD_DISALLOW: - param = 0; - break; - default: - return -EINVAL; - } - - if (mode == XGMI_PLPD_DISALLOW) - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - param, NULL); - else - /* change xgmi per-link power down policy */ - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SelectPLPDMode, - param, NULL); - - if (ret) - dev_err(adev->dev, - "select xgmi per-link power down policy %d failed\n", - mode); - - return ret; -} - static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3164,7 +3791,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .fini_power = smu_v13_0_fini_power, .check_fw_status = smu_v13_0_6_check_fw_status, /* pptable related */ - .check_fw_version = smu_v13_0_check_fw_version, + .check_fw_version = smu_v13_0_6_check_fw_version, .set_driver_table_location = smu_v13_0_set_driver_table_location, .set_tool_table_location = smu_v13_0_set_tool_table_location, .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, @@ -3179,35 +3806,42 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .enable_thermal_alert = smu_v13_0_enable_thermal_alert, .disable_thermal_alert = smu_v13_0_disable_thermal_alert, .setup_pptable = smu_v13_0_6_setup_pptable, - .baco_is_support = smu_v13_0_6_is_baco_supported, + .get_bamaco_support = smu_v13_0_6_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, - .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy, .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, .get_pm_metrics = smu_v13_0_6_get_pm_metrics, + .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, + .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, + .link_reset = smu_v13_0_6_link_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, .send_rma_reason = smu_v13_0_6_send_rma_reason, + .reset_sdma = smu_v13_0_6_reset_sdma, + .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, + .dpm_reset_vcn = smu_v13_0_6_reset_vcn, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &smu_v13_0_6_ppt_funcs; - smu->message_map = smu_v13_0_6_message_map; + smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_message_map : smu_v13_0_6_message_map; smu->clock_map = smu_v13_0_6_clk_map; - smu->feature_map = smu_v13_0_6_feature_mask_map; + smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; + smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index f0fa42a645c0..d38d6d76b1e7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -26,7 +26,64 @@ #define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2 #define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4 #define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2 +#define SMU_CAP(x) SMU_13_0_6_CAPS_##x + +typedef enum { +/*0*/ METRICS_VERSION_V0 = 0, +/*1*/ METRICS_VERSION_V1 = 1, +/*2*/ METRICS_VERSION_V2 = 2, + +/*3*/ NUM_METRICS = 3 +} METRICS_LIST_e; + +struct PPTable_t { + uint32_t MaxSocketPowerLimit; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + uint64_t PublicSerialNumber_AID; + bool Init; +}; + +enum smu_v13_0_6_caps { + SMU_CAP(DPM), + SMU_CAP(DPM_POLICY), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(PCIE_METRICS), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(PER_INST_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND), + SMU_CAP(SDMA_RESET), + SMU_CAP(STATIC_METRICS), + SMU_CAP(HST_LIMIT_METRICS), + SMU_CAP(BOARD_VOLTAGE), + SMU_CAP(PLDM_VERSION), + SMU_CAP(ALL), +}; extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); +bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); +int smu_v13_0_12_get_max_metrics_size(void); +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, uint32_t *value); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, + struct amdgpu_xcp *xcp, void *table, + void *smu_metrics); +extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; +extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7318964f1f14..c8f4f6fb4083 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -83,6 +83,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_7_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -818,6 +807,10 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, else *value = metrics->AverageMemclkFrequencyPreDs; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_VCLK: *value = metrics->AverageVclk0Frequency; break; @@ -962,6 +955,12 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_7_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_7_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -1132,6 +1131,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1452,6 +1459,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1548,6 +1591,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1841,6 +1894,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_7_od_restore_table_single(smu, input[0]); @@ -1964,7 +2059,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2061,6 +2157,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; @@ -2106,7 +2204,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) @@ -2172,6 +2274,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_7_set_supported_od_feature_mask(smu); @@ -2378,7 +2484,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf size += sysfs_emit_at(buf, size, " "); for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) - size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], + size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "* " : " "); size += sysfs_emit_at(buf, size, "\n"); @@ -2408,7 +2514,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf do { \ size += sysfs_emit_at(buf, size, "%-30s", #field); \ for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ - size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ + size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ size += sysfs_emit_at(buf, size, "\n"); \ } while (0) @@ -2434,69 +2540,110 @@ out: return result; } -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8 +#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_ActiveHystLimit = input[idx + 1]; + activity_monitor->Gfx_IdleHystLimit = input[idx + 2]; + activity_monitor->Gfx_FPS = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 6]; + activity_monitor->Gfx_BoosterFreq = input[idx + 7]; + } + idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_ActiveHystLimit = input[idx + 1]; + activity_monitor->Fclk_IdleHystLimit = input[idx + 2]; + activity_monitor->Fclk_FPS = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 6]; + activity_monitor->Fclk_BoosterFreq = input[idx + 7]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_ActiveHystLimit = input[1]; - activity_monitor->Gfx_IdleHystLimit = input[2]; - activity_monitor->Gfx_FPS = input[3]; - activity_monitor->Gfx_MinActiveFreqType = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_MinActiveFreq = input[6]; - activity_monitor->Gfx_BoosterFreq = input[7]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_ActiveHystLimit = input[1]; - activity_monitor->Fclk_IdleHystLimit = input[2]; - activity_monitor->Fclk_FPS = input[3]; - activity_monitor->Fclk_MinActiveFreqType = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_MinActiveFreq = input[6]; - activity_monitor->Fclk_BoosterFreq = input[7]; - break; +static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_7_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2596,7 +2743,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, .is_dpm_running = smu_v13_0_7_is_dpm_running, - .dump_pptable = smu_v13_0_7_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, @@ -2650,7 +2796,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, @@ -2661,6 +2807,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check, .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, + .interrupt_work = smu_v13_0_interrupt_work, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 2d1736234b4a..73b4506ef5a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en) return ret; } -static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -363,7 +365,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -423,6 +425,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = yellow_carp_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = yellow_carp_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, @@ -771,7 +779,7 @@ static int yellow_carp_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -861,31 +869,9 @@ static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -961,9 +947,10 @@ failed: } static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -1150,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1270,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1282,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (fclk_min && fclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_FCLK, - fclk_min, - fclk_max); + SMU_FCLK, + fclk_min, + fclk_max, + false); if (ret) return ret; } if (socclk_min && socclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SOCCLK, - socclk_min, - socclk_max); + SMU_SOCCLK, + socclk_min, + socclk_max, + false); if (ret) return ret; } @@ -1302,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1311,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } |