diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
27 files changed, 733 insertions, 398 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5c1cbdc122d2..71d986dd7a6e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: + case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate, 0)); @@ -852,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; - int ret = 0; - - if (type != PP_SCLK) - return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - mutex_lock(&adev->pm.mutex); - ret = smu_set_soft_freq_range(smu, - SMU_SCLK, + guard(mutex)(&adev->pm.mutex); + + return smu_set_soft_freq_range(smu, + type, min, max); - mutex_unlock(&adev->pm.mutex); - - return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index edd9895b46c0..4b64851fdb42 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) return -EINVAL; parameter_size++; + if (!tmp_str) + break; while (isspace(*tmp_str)) tmp_str++; } @@ -1890,7 +1892,7 @@ out: static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -1901,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { uint32_t ss_power; - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, (void *)&ss_power)) @@ -3645,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf, return -EINVAL; parameter_size++; + if (!tmp_str) + break; + while (isspace(*tmp_str)) tmp_str++; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 34e71727b27d..307ebf7e3226 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, enable); if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); } } @@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = kv_process_firmware_header(adev); if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); + drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n"); return ret; } kv_init_fps_limits(adev); kv_init_graphics_levels(adev); ret = kv_program_bootup_state(adev); if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); + drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n"); return ret; } kv_calculate_dfs_bypass_settings(adev); ret = kv_upload_dpm_settings(adev); if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); + drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n"); return ret; } ret = kv_populate_uvd_table(adev); if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n"); return ret; } ret = kv_populate_vce_table(adev); if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n"); return ret; } ret = kv_populate_samu_table(adev); if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n"); return ret; } ret = kv_populate_acp_table(adev); if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n"); return ret; } kv_program_vc(adev); @@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev) if (pi->enable_auto_thermal_throttling) { ret = kv_enable_auto_thermal_throttling(adev); if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n"); return ret; } } ret = kv_enable_dpm_voltage_scaling(adev); if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n"); return ret; } ret = kv_set_dpm_interval(adev); if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n"); return ret; } ret = kv_set_dpm_boot_state(adev); if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n"); return ret; } ret = kv_enable_ulv(adev, true); if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n"); return ret; } kv_start_dpm(adev); ret = kv_enable_didt(adev, true); if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_didt failed\n"); return ret; } ret = kv_enable_smc_cac(adev, true); if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n"); return ret; } @@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = amdgpu_kv_smc_bapm_enable(adev, false); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } @@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n"); return ret; } amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, @@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev) err = amdgpu_kv_smc_bapm_enable(adev, false); if (err) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); if (adev->asic_type == CHIP_MULLINS) kv_enable_nb_dpm(adev, false); @@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } } @@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle) ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_sclk_t(adev); @@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle) kv_set_enabled_levels(adev); ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_acp_boot_level(adev); @@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } @@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) data_offset); if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); @@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) else pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n"); } if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) @@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps) struct kv_ps *ps = kv_get_ps(rps); struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n", + rps->vclk, rps->dclk); for (i = 0; i < ps->num_levels; i++) { struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + drm_dbg(adev_to_drm(adev), + "power level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static void kv_dpm_fini(struct amdgpu_device *adev) @@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - DRM_INFO("amdgpu: dpm initialized\n"); + drm_info(adev_to_drm(adev), "dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - DRM_ERROR("amdgpu: dpm initialization failed\n"); + drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index c7518b13e787..ea3ace882a10 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -47,7 +47,7 @@ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -void amdgpu_dpm_print_class_info(u32 class, u32 class2) +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2) { const char *s; @@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2) s = "performance"; break; } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); + drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s); if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tinternal class: none\n"); + else + drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "", + (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "", + (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "", + (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "", + (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "", + (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "", + (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "", + (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : ""); } -void amdgpu_dpm_print_cap_info(u32 caps) +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps) { - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n", + (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "", + (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "", + (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : ""); } -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps) { - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n", + rps == adev->pm.dpm.current_ps ? " c" : "", + rps == adev->pm.dpm.requested_ps ? " r" : "", + rps == adev->pm.dpm.boot_ps ? " b" : ""); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) adev->pm.fan_max_rpm = controller->ucFanMaxRPM; } if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_CI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n", controller->ucType, controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) return -EINVAL; if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { - printk("switching from power state:\n"); + drm_dbg(adev_to_drm(adev), "switching from power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); + drm_dbg(adev_to_drm(adev), "switching to power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h index 93bd3973330c..7120eef30509 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -23,10 +23,9 @@ #ifndef __LEGACY_DPM_H__ #define __LEGACY_DPM_H__ -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2); +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps); +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 4c0e976004ba..52e732be59e3 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7951,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle, struct rv7xx_pl *pl; int i; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index 79a566f3564a..c305ea4ec17d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, indirect_port, index); - return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask); } int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d79a1d94661a..756afe78a6e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -76,6 +76,7 @@ static void smu_power_profile_mode_get(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); +static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -134,12 +135,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) } int smu_set_soft_freq_range(struct smu_context *smu, - enum smu_clk_type clk_type, + enum pp_clock_type type, uint32_t min, uint32_t max) { + enum smu_clk_type clk_type; int ret = 0; + clk_type = smu_convert_to_smuclk(type); + if (clk_type == SMU_CLK_COUNT) + return -EINVAL; + if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, @@ -307,6 +313,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu, return ret; } +static int smu_dpm_set_isp_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret; + + if (!smu->ppt_funcs->dpm_set_isp_enable) + return 0; + + if (atomic_read(&power_gate->isp_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->isp_gated, !enable); + + return ret; +} + static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -408,6 +434,12 @@ static int smu_dpm_set_power_gate(void *handle, dev_err(smu->adev->dev, "Failed to power %s VPE!\n", gate ? "gate" : "ungate"); break; + case AMD_IP_BLOCK_TYPE_ISP: + ret = smu_dpm_set_isp_enable(smu, !gate); + if (ret) + dev_err(smu->adev->dev, "Failed to power %s ISP!\n", + gate ? "gate" : "ungate"); + break; default: dev_err(smu->adev->dev, "Unsupported block type!\n"); return -EINVAL; @@ -1004,6 +1036,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } +static void smu_update_gpu_addresses(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; + struct smu_table *driver_table = &(smu_table->driver_table); + struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; + + if (pm_status_table->bo) + pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); + if (driver_table->bo) + driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); + if (dummy_read_1_table->bo) + dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); +} + /** * smu_alloc_memory_pool - allocate memory pool in the system memory * @@ -1285,6 +1332,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); + atomic_set(&smu->smu_power.power_gate.isp_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); smu_init_power_profile(smu); @@ -1672,37 +1720,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) } } - ret = smu_system_features_control(smu, true); - if (ret) { - dev_err(adev->dev, "Failed to enable requested dpm features!\n"); - return ret; - } - - smu_init_xgmi_plpd_mode(smu); - - ret = smu_feature_get_enabled_mask(smu, &features_supported); - if (ret) { - dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); - return ret; - } - bitmap_copy(feature->supported, - (unsigned long *)&features_supported, - feature->feature_num); - - if (!smu_is_dpm_running(smu)) - dev_info(adev->dev, "dpm has been disabled\n"); - - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) pcie_gen = 4; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -1738,6 +1755,37 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + ret = smu_system_features_control(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable requested dpm features!\n"); + return ret; + } + + smu_init_xgmi_plpd_mode(smu); + + ret = smu_feature_get_enabled_mask(smu, &features_supported); + if (ret) { + dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); + return ret; + } + bitmap_copy(feature->supported, + (unsigned long *)&features_supported, + feature->feature_num); + + if (!smu_is_dpm_running(smu)) + dev_info(adev->dev, "dpm has been disabled\n"); + + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_get_thermal_temperature_range(smu); if (ret) { dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); @@ -1780,6 +1828,9 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + smu_update_gpu_addresses(smu); + smu->smc_fw_state = SMU_FW_INIT; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2935,6 +2986,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCLK; break; case PP_DCLK1: clk_type = SMU_DCLK1; break; + case PP_ISPICLK: + clk_type = SMU_ISPICLK; + break; + case PP_ISPXCLK: + clk_type = SMU_ISPXCLK; + break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 9aacc7bc1c69..b52e194397e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -402,6 +402,7 @@ struct smu_power_gate { atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; + atomic_t isp_gated; atomic_t umsch_mm_gated; }; @@ -1436,6 +1437,12 @@ struct pptable_funcs { int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); /** + * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power + * management. + */ + int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); + + /** * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power * management. */ @@ -1635,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, uint32_t min, uint32_t max); int smu_set_gfx_power_up_by_imu(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 1bc30db22f9c..cd44f4254134 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -106,6 +106,7 @@ typedef struct { #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 +#define ISP_ALL_TILES_MASK 0x7FF typedef struct { uint32_t UClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index d7505cfc433a..0a2ca544f4e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -86,8 +86,10 @@ typedef enum { /*36*/ FEATURE_PIT = 36, /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, +/*39*/ FEATURE_GLOBAL_DPM = 39, +/*40*/ FEATURE_NODE_POWER_MANAGER = 40, -/*39*/ NUM_FEATURES = 39 +/*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -133,7 +135,7 @@ typedef enum { GFX_DVM_MARGIN_COUNT } GFX_DVM_MARGIN_e; -#define SMU_METRICS_TABLE_VERSION 0x12 +#define SMU_METRICS_TABLE_VERSION 0x13 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -275,6 +277,16 @@ typedef struct { //PSNs uint64_t PublicSerialNumber_AID[4]; uint64_t PublicSerialNumber_XCD[8]; + + //XGMI + uint32_t MaxXgmiWidth; + uint32_t MaxXgmiBitrate; + + // Telemetry + uint32_t InputTelemetryVoltageInmV; + + // General info + uint32_t pldmVersion[2]; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index eefdaa0b5df6..d7a9e41820fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -305,6 +305,8 @@ enum smu_clk_type { SMU_MCLK, SMU_PCIE, SMU_LCLK, + SMU_ISPICLK, + SMU_ISPXCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 7fad5dfb39c4..aac202d0c30e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 115e3fa456bc..d57591509aed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint8_t min_gen_speed, max_gen_speed; uint8_t min_lane_width, max_lane_width; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); @@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16 | + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) || + table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) { + smu_pcie_arg = (i << 16 | pcie_table->pcie_gen[i] << 8 | pcie_table->pcie_lane[i]); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index a55ea76d7399..2c9869feba61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 9481f897432d..e97b0cf19197 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); bool cur_value_match_level = false; memset(&metrics, 0, sizeof(metrics)); @@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMinGfxclkFrequency, - 0, &min); - if (ret) - return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMaxGfxclkFrequency, - 0, &max); - if (ret) - return ret; - size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); - } + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMinGfxclkFrequency, + 0, &min); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMaxGfxclkFrequency, + 0, &max); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); break; case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; - max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; - size += sysfs_emit_at(buf, size, "OD_SCLK\n"); - size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); - size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); - } + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + size += sysfs_emit_at(buf, size, "OD_SCLK\n"); + size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); + size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); break; case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 6de653d2ed62..c63d2e28954d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -2081,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, .get_bamaco_support = aldebaran_get_bamaco_support, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 1c7235935d14..1a1f2a6b2e52 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2386,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, &dpm_context->dpm_tables.pcie_table; int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; if (!num_of_levels) return 0; @@ -2402,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } } - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; - } - - return 0; + return ret; } int smu_v13_0_disable_pmfw_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5a9711e8cf68..e084ed99ec0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + uint32_t smu_pcie_arg; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_0_print_clk_levels, .force_clk_levels = smu_v13_0_0_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index e0d356f93ab0..02a455a31c25 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -187,8 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void) return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); } +static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint16_t max_speed; + uint8_t max_width; + int ret; + + if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) { + max_width = (uint8_t)static_metrics->MaxXgmiWidth; + max_speed = (uint16_t)static_metrics->MaxXgmiBitrate; + ret = 0; + } else { + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (!ret) { + max_width = (uint8_t)metrics->XgmiWidth; + max_speed = (uint16_t)metrics->XgmiBitrate; + } + } + if (!ret) + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); +} + int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = @@ -237,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + } + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; + smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -263,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; int xcc_id; /* For clocks with multiple instances, only report the first one */ @@ -319,7 +356,7 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, break; } - return ret; + return 0; } ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index f00ef7f3f355..9cc294f4708b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -345,6 +345,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00562500) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + + if (fw_ver >= 0x04560100) { + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_6_init_caps(struct smu_context *smu) @@ -685,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, - void *metrics_table, bool bypass_cache) +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; @@ -800,6 +805,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + uint16_t max_speed; + uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) @@ -835,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); + max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version); + max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version); + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = @@ -871,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - uint32_t clock_limit = 0, param; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk, param; int ret = 0, clk_id = 0; - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + /* Use dpm tables, if data is already fetched */ + if (pptable->Init) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK: - if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.uclk_table; break; case SMU_GFXCLK: case SMU_SCLK: - if (pptable->Init) - clock_limit = pptable->MinGfxclkFrequency; + dpm_table = &dpm_context->dpm_tables.gfx_table; break; case SMU_SOCCLK: - if (pptable->Init) - clock_limit = pptable->SocclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.soc_table; break; case SMU_FCLK: - if (pptable->Init) - clock_limit = pptable->FclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.fclk_table; break; case SMU_VCLK: - if (pptable->Init) - clock_limit = pptable->VclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.vclk_table; break; case SMU_DCLK: - if (pptable->Init) - clock_limit = pptable->DclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.dclk_table; break; default: - break; + return -EINVAL; } - if (min) - *min = clock_limit; + min_clk = dpm_table->min; + max_clk = dpm_table->max; + if (min) + *min = min_clk; if (max) - *max = clock_limit; + *max = max_clk; - return 0; + if (min_clk && max_clk) + return 0; } if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { @@ -1377,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - min_clk = pstate_table->gfxclk_pstate.curr.min; - max_clk = pstate_table->gfxclk_pstate.curr.max; + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + min_clk = single_dpm_table->min; + max_clk = single_dpm_table->max; if (now < SMU_13_0_6_DSCLK_THRESHOLD) { size += sysfs_emit_at(buf, size, "S: %uMhz *\n", @@ -2682,7 +2693,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); if (ret) { kfree(metrics_v0); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index d38d6d76b1e7..67b30674fd31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -74,6 +74,8 @@ enum smu_v13_0_6_caps { extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache); bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); int smu_v13_0_12_get_max_metrics_size(void); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c8f4f6fb4083..c96fa5e49ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) PPTable_t *driver_ppt = smu->smu_table.driver_pptable; SkuTable_t *skutable = &driver_ppt->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + int link_level; + uint32_t smu_pcie_arg; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_7_print_clk_levels, .force_clk_levels = smu_v13_0_7_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 84f9b007b59f..fe00c84b1cc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + u32 min, + u32 max, + bool __always_unused automatic) { - enum smu_message_type msg_set_min, msg_set_max; - int ret = 0; + enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT; + enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT; + int ret = -EINVAL; if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) return -EINVAL; @@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, msg_set_min = SMU_MSG_SetHardMinVcn1; msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; + case SMU_ISPICLK: + msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq; + break; + case SMU_ISPXCLK: + msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq; + break; default: return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); - if (ret) - return ret; + if (min && msg_set_min != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + + if (max && msg_set_max != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); - return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + return ret; } static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, @@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, if (ret) break; - ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); break; default: ret = -EINVAL; @@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, - sclk_max); + sclk_max, + false); if (ret) return ret; @@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + false); if (ret) return ret; } @@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + false); if (ret) return ret; } @@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK1, vclk1_min, - vclk1_max); + vclk1_max, + false); if (ret) return ret; } @@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } @@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK1, dclk1_min, - dclk1_max); + dclk1_max, + false); if (ret) return ret; } @@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 0, NULL); } +static int smu_v14_0_0_set_isp_enable(struct smu_context *smu, + bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile, + ISP_ALL_TILES_MASK, NULL); +} + static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, @@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, + .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .set_mall_enable = smu_v14_0_common_set_mall_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 82c2db972491..3aea32baea3d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_14_0_dpm_table *dpm_table; - struct smu_14_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - - if (link_level == 0) - link_level++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - int num_of_levels = pcie_table->num_of_link_levels; + int num_of_levels; uint32_t smu_pcie_arg; - int ret, i; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + num_of_levels = pcie_table->num_of_link_levels; if (!num_of_levels) return 0; @@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } - } - - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, smu_pcie_arg, NULL); - if (ret) - return ret; + if (ret) + break; + } + } } - return 0; + return ret; } static const struct smu_temperature_range smu14_thermal_policy[] = { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 7eaf58fd7f9a..59f9abd0f7b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, #define SMU_RESP_BUSY_OTHER 0xFC #define SMU_RESP_DEBUG_END 0xFB +#define SMU_RESP_UNEXP (~0U) /** * __smu_cmn_poll_stat -- poll for a status from the SMU * @smu: a pointer to SMU context @@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, dev_err_ratelimited(adev->dev, "SMU: I'm debugging!"); break; + case SMU_RESP_UNEXP: + if (amdgpu_device_bus_status_check(smu->adev)) { + /* print error immediately if device is off the bus */ + dev_err(adev->dev, + "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", + reg_c2pmsg_90, msg_index, param, message); + break; + } + fallthrough; default: dev_err_ratelimited(adev->dev, "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 7473672abd2a..a608cdbdada4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,28 +40,29 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 -#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct gpu_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ + struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*tmp); \ } while (0) -#define smu_cmn_init_partition_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_partition_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ } while (0) extern const int link_speed[]; |