diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
31 files changed, 1055 insertions, 472 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 2148c8db5a59..71d986dd7a6e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: + case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate, 0)); @@ -852,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; - int ret = 0; - - if (type != PP_SCLK) - return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - mutex_lock(&adev->pm.mutex); - ret = smu_set_soft_freq_range(smu, - SMU_SCLK, + guard(mutex)(&adev->pm.mutex); + + return smu_set_soft_freq_range(smu, + type, min, max); - mutex_unlock(&adev->pm.mutex); - - return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) @@ -1697,6 +1692,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) } } +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev) +{ + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu->od_enabled; + } else { + struct pp_hwmgr *hwmgr; + + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } +} + int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size) @@ -2019,3 +2036,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, return ret; } + +/** + * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute + * partition + * @adev: Pointer to the device. + * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. + * @table: Pointer to a buffer where the metrics will be stored. If NULL, the + * function returns the size of the metrics structure. + * + * This function retrieves metrics for a specific XCP, including details such as + * VCN/JPEG activity, clock frequencies, and other performance metrics. If the + * table parameter is NULL, the function returns the size of the metrics + * structure without populating it. + * + * Return: Size of the metrics structure on success, or a negative error code on failure. + */ +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_xcp_metrics) + return 0; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, + table); + mutex_unlock(&adev->pm.mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index edd9895b46c0..4b64851fdb42 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) return -EINVAL; parameter_size++; + if (!tmp_str) + break; while (isspace(*tmp_str)) tmp_str++; } @@ -1890,7 +1892,7 @@ out: static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -1901,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { uint32_t ss_power; - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, (void *)&ss_power)) @@ -3645,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf, return -EINVAL; parameter_size++; + if (!tmp_str) + break; + while (isspace(*tmp_str)) tmp_str++; } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 2c3c97587dd5..768317ee1486 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -524,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size); int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table); /** * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The @@ -561,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, void **addr, size_t *size); int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev); int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 59fae668dc3f..307ebf7e3226 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, enable); if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); } } @@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = kv_process_firmware_header(adev); if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); + drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n"); return ret; } kv_init_fps_limits(adev); kv_init_graphics_levels(adev); ret = kv_program_bootup_state(adev); if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); + drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n"); return ret; } kv_calculate_dfs_bypass_settings(adev); ret = kv_upload_dpm_settings(adev); if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); + drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n"); return ret; } ret = kv_populate_uvd_table(adev); if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n"); return ret; } ret = kv_populate_vce_table(adev); if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n"); return ret; } ret = kv_populate_samu_table(adev); if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n"); return ret; } ret = kv_populate_acp_table(adev); if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n"); return ret; } kv_program_vc(adev); @@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev) if (pi->enable_auto_thermal_throttling) { ret = kv_enable_auto_thermal_throttling(adev); if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n"); return ret; } } ret = kv_enable_dpm_voltage_scaling(adev); if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n"); return ret; } ret = kv_set_dpm_interval(adev); if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n"); return ret; } ret = kv_set_dpm_boot_state(adev); if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n"); return ret; } ret = kv_enable_ulv(adev, true); if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n"); return ret; } kv_start_dpm(adev); ret = kv_enable_didt(adev, true); if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_didt failed\n"); return ret; } ret = kv_enable_smc_cac(adev, true); if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n"); return ret; } @@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = amdgpu_kv_smc_bapm_enable(adev, false); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } @@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n"); return ret; } amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, @@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev) err = amdgpu_kv_smc_bapm_enable(adev, false); if (err) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); if (adev->asic_type == CHIP_MULLINS) kv_enable_nb_dpm(adev, false); @@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } } @@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle) ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_sclk_t(adev); @@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle) kv_set_enabled_levels(adev); ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_acp_boot_level(adev); @@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } @@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) data_offset); if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); @@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) else pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n"); } if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) @@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); } if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) pi->caps_enable_dfs_bypass = true; sumo_construct_sclk_voltage_mapping_table(adev, @@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps) struct kv_ps *ps = kv_get_ps(rps); struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n", + rps->vclk, rps->dclk); for (i = 0; i < ps->num_levels; i++) { struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + drm_dbg(adev_to_drm(adev), + "power level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static void kv_dpm_fini(struct amdgpu_device *adev) @@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - DRM_INFO("amdgpu: dpm initialized\n"); + drm_info(adev_to_drm(adev), "dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - DRM_ERROR("amdgpu: dpm initialization failed\n"); + drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index c7518b13e787..ea3ace882a10 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -47,7 +47,7 @@ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -void amdgpu_dpm_print_class_info(u32 class, u32 class2) +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2) { const char *s; @@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2) s = "performance"; break; } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); + drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s); if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tinternal class: none\n"); + else + drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "", + (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "", + (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "", + (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "", + (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "", + (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "", + (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "", + (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : ""); } -void amdgpu_dpm_print_cap_info(u32 caps) +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps) { - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n", + (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "", + (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "", + (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : ""); } -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps) { - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n", + rps == adev->pm.dpm.current_ps ? " c" : "", + rps == adev->pm.dpm.requested_ps ? " r" : "", + rps == adev->pm.dpm.boot_ps ? " b" : ""); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) adev->pm.fan_max_rpm = controller->ucFanMaxRPM; } if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_CI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n", controller->ucType, controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) return -EINVAL; if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { - printk("switching from power state:\n"); + drm_dbg(adev_to_drm(adev), "switching from power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); + drm_dbg(adev_to_drm(adev), "switching to power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h index 93bd3973330c..7120eef30509 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -23,10 +23,9 @@ #ifndef __LEGACY_DPM_H__ #define __LEGACY_DPM_H__ -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2); +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps); +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 4c0e976004ba..52e732be59e3 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7951,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle, struct rv7xx_pl *pl; int i; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 9d3b33446adc..9b20076e26c0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) } if (le32_to_cpu(info->ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableDFSBypass); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index 79a566f3564a..c305ea4ec17d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, indirect_port, index); - return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask); } int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index f24a1d8c77db..756afe78a6e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -76,6 +76,7 @@ static void smu_power_profile_mode_get(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); +static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -134,12 +135,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) } int smu_set_soft_freq_range(struct smu_context *smu, - enum smu_clk_type clk_type, + enum pp_clock_type type, uint32_t min, uint32_t max) { + enum smu_clk_type clk_type; int ret = 0; + clk_type = smu_convert_to_smuclk(type); + if (clk_type == SMU_CLK_COUNT) + return -EINVAL; + if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, @@ -307,6 +313,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu, return ret; } +static int smu_dpm_set_isp_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret; + + if (!smu->ppt_funcs->dpm_set_isp_enable) + return 0; + + if (atomic_read(&power_gate->isp_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->isp_gated, !enable); + + return ret; +} + static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -408,6 +434,12 @@ static int smu_dpm_set_power_gate(void *handle, dev_err(smu->adev->dev, "Failed to power %s VPE!\n", gate ? "gate" : "ungate"); break; + case AMD_IP_BLOCK_TYPE_ISP: + ret = smu_dpm_set_isp_enable(smu, !gate); + if (ret) + dev_err(smu->adev->dev, "Failed to power %s ISP!\n", + gate ? "gate" : "ungate"); + break; default: dev_err(smu->adev->dev, "Unsupported block type!\n"); return -EINVAL; @@ -1004,6 +1036,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } +static void smu_update_gpu_addresses(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; + struct smu_table *driver_table = &(smu_table->driver_table); + struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; + + if (pm_status_table->bo) + pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); + if (driver_table->bo) + driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); + if (dummy_read_1_table->bo) + dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); +} + /** * smu_alloc_memory_pool - allocate memory pool in the system memory * @@ -1285,6 +1332,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); + atomic_set(&smu->smu_power.power_gate.isp_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); smu_init_power_profile(smu); @@ -1672,37 +1720,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) } } - ret = smu_system_features_control(smu, true); - if (ret) { - dev_err(adev->dev, "Failed to enable requested dpm features!\n"); - return ret; - } - - smu_init_xgmi_plpd_mode(smu); - - ret = smu_feature_get_enabled_mask(smu, &features_supported); - if (ret) { - dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); - return ret; - } - bitmap_copy(feature->supported, - (unsigned long *)&features_supported, - feature->feature_num); - - if (!smu_is_dpm_running(smu)) - dev_info(adev->dev, "dpm has been disabled\n"); - - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) pcie_gen = 4; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -1738,6 +1755,37 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + ret = smu_system_features_control(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable requested dpm features!\n"); + return ret; + } + + smu_init_xgmi_plpd_mode(smu); + + ret = smu_feature_get_enabled_mask(smu, &features_supported); + if (ret) { + dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); + return ret; + } + bitmap_copy(feature->supported, + (unsigned long *)&features_supported, + feature->feature_num); + + if (!smu_is_dpm_running(smu)) + dev_info(adev->dev, "dpm has been disabled\n"); + + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_get_thermal_temperature_range(smu); if (ret) { dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); @@ -1780,6 +1828,9 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + smu_update_gpu_addresses(smu); + smu->smc_fw_state = SMU_FW_INIT; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2935,6 +2986,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCLK; break; case PP_DCLK1: clk_type = SMU_DCLK1; break; + case PP_ISPICLK: + clk_type = SMU_ISPICLK; + break; + case PP_ISPXCLK: + clk_type = SMU_ISPXCLK; + break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: @@ -3758,6 +3815,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, return ret; } +static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) +{ + struct smu_context *smu = handle; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) + return -EOPNOTSUPP; + + return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); +} + static const struct amd_pm_funcs swsmu_pm_funcs = { /* export for sysfs */ .set_fan_control_mode = smu_set_fan_control_mode, @@ -3816,6 +3886,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_uclk_dpm_states = smu_get_uclk_dpm_states, .get_dpm_clock_table = smu_get_dpm_clock_table, .get_smu_prv_buf_details = smu_get_prv_buffer_details, + .get_xcp_metrics = smu_sys_get_xcp_metrics, }; int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index d47e32ae4671..b52e194397e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -402,6 +402,7 @@ struct smu_power_gate { atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; + atomic_t isp_gated; atomic_t umsch_mm_gated; }; @@ -1436,6 +1437,12 @@ struct pptable_funcs { int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); /** + * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power + * management. + */ + int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); + + /** * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power * management. */ @@ -1466,6 +1473,12 @@ struct pptable_funcs { */ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, struct freq_band_range *exclusion_ranges); + /** + * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. + * Return: Size of table + */ + ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, + void *table); }; typedef enum { @@ -1629,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, uint32_t min, uint32_t max); int smu_set_gfx_power_up_by_imu(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 1bc30db22f9c..cd44f4254134 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -106,6 +106,7 @@ typedef struct { #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 +#define ISP_ALL_TILES_MASK 0x7FF typedef struct { uint32_t UClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index d7505cfc433a..0a2ca544f4e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -86,8 +86,10 @@ typedef enum { /*36*/ FEATURE_PIT = 36, /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, +/*39*/ FEATURE_GLOBAL_DPM = 39, +/*40*/ FEATURE_NODE_POWER_MANAGER = 40, -/*39*/ NUM_FEATURES = 39 +/*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -133,7 +135,7 @@ typedef enum { GFX_DVM_MARGIN_COUNT } GFX_DVM_MARGIN_e; -#define SMU_METRICS_TABLE_VERSION 0x12 +#define SMU_METRICS_TABLE_VERSION 0x13 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -275,6 +277,16 @@ typedef struct { //PSNs uint64_t PublicSerialNumber_AID[4]; uint64_t PublicSerialNumber_XCD[8]; + + //XGMI + uint32_t MaxXgmiWidth; + uint32_t MaxXgmiBitrate; + + // Telemetry + uint32_t InputTelemetryVoltageInmV; + + // General info + uint32_t pldmVersion[2]; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 3d9e5e967c94..01790a927930 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -127,7 +127,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0x10 +#define SMU_METRICS_TABLE_VERSION 0x11 // Unified metrics table for smu_v13_0_6 typedef struct __attribute__((packed, aligned(4))) { @@ -463,6 +463,8 @@ typedef struct __attribute__((packed, aligned(4))) { typedef struct { // Telemetry uint32_t InputTelemetryVoltageInmV; + // General info + uint32_t pldmVersion[2]; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index eefdaa0b5df6..d7a9e41820fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -305,6 +305,8 @@ enum smu_clk_type { SMU_MCLK, SMU_PCIE, SMU_LCLK, + SMU_ISPICLK, + SMU_ISPXCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 7fad5dfb39c4..aac202d0c30e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 115e3fa456bc..d57591509aed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint8_t min_gen_speed, max_gen_speed; uint8_t min_lane_width, max_lane_width; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); @@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16 | + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) || + table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) { + smu_pcie_arg = (i << 16 | pcie_table->pcie_gen[i] << 8 | pcie_table->pcie_lane[i]); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index a55ea76d7399..2c9869feba61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 9481f897432d..e97b0cf19197 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); bool cur_value_match_level = false; memset(&metrics, 0, sizeof(metrics)); @@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMinGfxclkFrequency, - 0, &min); - if (ret) - return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMaxGfxclkFrequency, - 0, &max); - if (ret) - return ret; - size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); - } + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMinGfxclkFrequency, + 0, &min); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMaxGfxclkFrequency, + 0, &max); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); break; case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; - max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; - size += sysfs_emit_at(buf, size, "OD_SCLK\n"); - size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); - size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); - } + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + size += sysfs_emit_at(buf, size, "OD_SCLK\n"); + size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); + size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); break; case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 6de653d2ed62..c63d2e28954d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -2081,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, .get_bamaco_support = aldebaran_get_bamaco_support, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a7167668d189..1a1f2a6b2e52 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -2380,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, &dpm_context->dpm_tables.pcie_table; int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; if (!num_of_levels) return 0; @@ -2396,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } } - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; - } - - return 0; + return ret; } int smu_v13_0_disable_pmfw_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5a9711e8cf68..e084ed99ec0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + uint32_t smu_pcie_arg; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_0_print_clk_levels, .force_clk_levels = smu_v13_0_0_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index 533d58e57d05..02a455a31c25 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -187,8 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void) return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); } +static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint16_t max_speed; + uint8_t max_width; + int ret; + + if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) { + max_width = (uint8_t)static_metrics->MaxXgmiWidth; + max_speed = (uint16_t)static_metrics->MaxXgmiBitrate; + ret = 0; + } else { + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (!ret) { + max_width = (uint8_t)metrics->XgmiWidth; + max_speed = (uint16_t)metrics->XgmiBitrate; + } + } + if (!ret) + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); +} + int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = @@ -237,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + } + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; + smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -263,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; int xcc_id; /* For clocks with multiple instances, only report the first one */ @@ -319,10 +356,66 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, break; } - return ret; + return 0; +} + +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) +{ + const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + MetricsTable_t *metrics; + int inst, j, k, idx; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instance */ + inst = GET_INST(VCN, k); + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(metrics-> + JpegBusy[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + metrics->VclkFrequency[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + metrics->DclkFrequency[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + metrics->SocclkFrequency[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(metrics->UclkFrequency); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]); + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + + return sizeof(*xcp_metrics); } -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) { struct smu_table_context *smu_table = &smu->smu_table; struct gpu_metrics_v1_8 *gpu_metrics = @@ -334,8 +427,7 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) struct amdgpu_xcp *xcp; u32 inst_mask; - metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); - memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t)); + metrics = (MetricsTable_t *)smu_metrics; smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); @@ -416,13 +508,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); - gpu_metrics->xgmi_write_data_acc[i] = + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); ret = amdgpu_get_xgmi_link_status(adev, i); if (ret >= 0) - gpu_metrics->xgmi_link_status[i] = ret; + gpu_metrics->xgmi_link_status[j] = ret; } gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; @@ -474,7 +569,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) gpu_metrics->firmware_timestamp = metrics->Timestamp; *table = (void *)gpu_metrics; - kfree(metrics); return sizeof(*gpu_metrics); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 7d4ff09be7e8..9cc294f4708b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -312,6 +312,11 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu) smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); if (fw_ver >= 0x5551200) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + if (fw_ver >= 0x5551600) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_12_init_caps(struct smu_context *smu) @@ -340,6 +345,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00562500) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + + if (fw_ver >= 0x04560100) { + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_6_init_caps(struct smu_context *smu) @@ -392,10 +402,14 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) if ((pgm == 7 && fw_ver >= 0x7550E00) || (pgm == 0 && fw_ver >= 0x00557E00)) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); - if (fw_ver >= 0x00557F01) { + if ((pgm == 0 && fw_ver >= 0x00557F01) || + (pgm == 7 && fw_ver >= 0x7551000)) { smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); } + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); } if (((pgm == 7) && (fw_ver >= 0x7550700)) || ((pgm == 0) && (fw_ver >= 0x00557900)) || @@ -676,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, - void *metrics_table, bool bypass_cache) +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; @@ -752,6 +766,11 @@ static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu, } dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; } int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) @@ -786,6 +805,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + uint16_t max_speed; + uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) @@ -821,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); + max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version); + max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version); + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = @@ -857,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - uint32_t clock_limit = 0, param; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk, param; int ret = 0, clk_id = 0; - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + /* Use dpm tables, if data is already fetched */ + if (pptable->Init) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK: - if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.uclk_table; break; case SMU_GFXCLK: case SMU_SCLK: - if (pptable->Init) - clock_limit = pptable->MinGfxclkFrequency; + dpm_table = &dpm_context->dpm_tables.gfx_table; break; case SMU_SOCCLK: - if (pptable->Init) - clock_limit = pptable->SocclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.soc_table; break; case SMU_FCLK: - if (pptable->Init) - clock_limit = pptable->FclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.fclk_table; break; case SMU_VCLK: - if (pptable->Init) - clock_limit = pptable->VclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.vclk_table; break; case SMU_DCLK: - if (pptable->Init) - clock_limit = pptable->DclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.dclk_table; break; default: - break; + return -EINVAL; } - if (min) - *min = clock_limit; + min_clk = dpm_table->min; + max_clk = dpm_table->max; + if (min) + *min = min_clk; if (max) - *max = clock_limit; + *max = max_clk; - return 0; + if (min_clk && max_clk) + return 0; } if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { @@ -1363,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - min_clk = pstate_table->gfxclk_pstate.curr.min; - max_clk = pstate_table->gfxclk_pstate.curr.max; + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + min_clk = single_dpm_table->min; + max_clk = single_dpm_table->max; if (now < SMU_13_0_6_DSCLK_THRESHOLD) { size += sysfs_emit_at(buf, size, "S: %uMhz *\n", @@ -2529,6 +2554,126 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) return pcie_gen_to_speed(speed_level + 1); } +static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, + void *table) +{ + const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + int version = smu_v13_0_6_get_metrics_version(smu); + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + int ret, inst, i, j, k, idx; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; + u32 inst_mask; + bool per_inst; + + if (!table) + return sizeof(*xcp_metrics); + + for_each_xcp(adev->xcp_mgr, xcp, i) { + if (xcp->id == xcp_id) + break; + } + if (i == adev->xcp_mgr->num_xcps) + return -EINVAL; + + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + if (!metrics_v0) + return -ENOMEM; + + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + if (ret) { + kfree(metrics_v0); + return ret; + } + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); + goto out; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(VclkFrequency, version)[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(DclkFrequency, version)[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(SocclkFrequency, version)[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); + + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, + version)[inst]); + + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + xcp_metrics->gfx_below_host_limit_thm_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + xcp_metrics->gfx_low_utilization_acc + [idx] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); + } + idx++; + } + } +out: + kfree(metrics_v0); + + return sizeof(*xcp_metrics); +} + static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; @@ -2542,20 +2687,24 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; u16 link_width_level; + ssize_t num_bytes; u8 num_jpeg_rings; u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); if (ret) { kfree(metrics_v0); return ret; } if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_get_gpu_metrics(smu, table); + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + kfree(metrics_v0); + return num_bytes; + } metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; @@ -2670,13 +2819,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); - gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); ret = amdgpu_get_xgmi_link_status(adev, i); if (ret >= 0) - gpu_metrics->xgmi_link_status[i] = ret; + gpu_metrics->xgmi_link_status[j] = ret; } gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; @@ -3673,6 +3825,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, .get_pm_metrics = smu_v13_0_6_get_pm_metrics, + .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index d151bcd0cca7..67b30674fd31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -67,19 +67,25 @@ enum smu_v13_0_6_caps { SMU_CAP(STATIC_METRICS), SMU_CAP(HST_LIMIT_METRICS), SMU_CAP(BOARD_VOLTAGE), + SMU_CAP(PLDM_VERSION), SMU_CAP(ALL), }; extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache); bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); int smu_v13_0_12_get_max_metrics_size(void); int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, + struct amdgpu_xcp *xcp, void *table, + void *smu_metrics); extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c8f4f6fb4083..c96fa5e49ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) PPTable_t *driver_ppt = smu->smu_table.driver_pptable; SkuTable_t *skutable = &driver_ppt->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + int link_level; + uint32_t smu_pcie_arg; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_7_print_clk_levels, .force_clk_levels = smu_v13_0_7_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 76c1adda83db..f9b0938c57ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 84f9b007b59f..fe00c84b1cc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + u32 min, + u32 max, + bool __always_unused automatic) { - enum smu_message_type msg_set_min, msg_set_max; - int ret = 0; + enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT; + enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT; + int ret = -EINVAL; if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) return -EINVAL; @@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, msg_set_min = SMU_MSG_SetHardMinVcn1; msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; + case SMU_ISPICLK: + msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq; + break; + case SMU_ISPXCLK: + msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq; + break; default: return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); - if (ret) - return ret; + if (min && msg_set_min != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + + if (max && msg_set_max != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); - return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + return ret; } static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, @@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, if (ret) break; - ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); break; default: ret = -EINVAL; @@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, - sclk_max); + sclk_max, + false); if (ret) return ret; @@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + false); if (ret) return ret; } @@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + false); if (ret) return ret; } @@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK1, vclk1_min, - vclk1_max); + vclk1_max, + false); if (ret) return ret; } @@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } @@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK1, dclk1_min, - dclk1_max); + dclk1_max, + false); if (ret) return ret; } @@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 0, NULL); } +static int smu_v14_0_0_set_isp_enable(struct smu_context *smu, + bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile, + ISP_ALL_TILES_MASK, NULL); +} + static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, @@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, + .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .set_mall_enable = smu_v14_0_common_set_mall_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 82c2db972491..3aea32baea3d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_14_0_dpm_table *dpm_table; - struct smu_14_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - - if (link_level == 0) - link_level++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - int num_of_levels = pcie_table->num_of_link_levels; + int num_of_levels; uint32_t smu_pcie_arg; - int ret, i; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + num_of_levels = pcie_table->num_of_link_levels; if (!num_of_levels) return 0; @@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } - } - - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, smu_pcie_arg, NULL); - if (ret) - return ret; + if (ret) + break; + } + } } - return 0; + return ret; } static const struct smu_temperature_range smu14_thermal_policy[] = { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 80eb1a03b3ca..59f9abd0f7b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, #define SMU_RESP_BUSY_OTHER 0xFC #define SMU_RESP_DEBUG_END 0xFB +#define SMU_RESP_UNEXP (~0U) /** * __smu_cmn_poll_stat -- poll for a status from the SMU * @smu: a pointer to SMU context @@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, dev_err_ratelimited(adev->dev, "SMU: I'm debugging!"); break; + case SMU_RESP_UNEXP: + if (amdgpu_device_bus_status_check(smu->adev)) { + /* print error immediately if device is off the bus */ + dev_err(adev->dev, + "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", + reg_c2pmsg_90, msg_index, param, message); + break; + } + fallthrough; default: dev_err_ratelimited(adev->dev, "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", @@ -1051,73 +1061,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu) false); } -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) -{ - struct metrics_table_header *header = (struct metrics_table_header *)table; - uint16_t structure_size; - -#define METRICS_VERSION(a, b) ((a << 16) | b) - - switch (METRICS_VERSION(frev, crev)) { - case METRICS_VERSION(1, 0): - structure_size = sizeof(struct gpu_metrics_v1_0); - break; - case METRICS_VERSION(1, 1): - structure_size = sizeof(struct gpu_metrics_v1_1); - break; - case METRICS_VERSION(1, 2): - structure_size = sizeof(struct gpu_metrics_v1_2); - break; - case METRICS_VERSION(1, 3): - structure_size = sizeof(struct gpu_metrics_v1_3); - break; - case METRICS_VERSION(1, 4): - structure_size = sizeof(struct gpu_metrics_v1_4); - break; - case METRICS_VERSION(1, 5): - structure_size = sizeof(struct gpu_metrics_v1_5); - break; - case METRICS_VERSION(1, 6): - structure_size = sizeof(struct gpu_metrics_v1_6); - break; - case METRICS_VERSION(1, 7): - structure_size = sizeof(struct gpu_metrics_v1_7); - break; - case METRICS_VERSION(1, 8): - structure_size = sizeof(struct gpu_metrics_v1_8); - break; - case METRICS_VERSION(2, 0): - structure_size = sizeof(struct gpu_metrics_v2_0); - break; - case METRICS_VERSION(2, 1): - structure_size = sizeof(struct gpu_metrics_v2_1); - break; - case METRICS_VERSION(2, 2): - structure_size = sizeof(struct gpu_metrics_v2_2); - break; - case METRICS_VERSION(2, 3): - structure_size = sizeof(struct gpu_metrics_v2_3); - break; - case METRICS_VERSION(2, 4): - structure_size = sizeof(struct gpu_metrics_v2_4); - break; - case METRICS_VERSION(3, 0): - structure_size = sizeof(struct gpu_metrics_v3_0); - break; - default: - return; - } - -#undef METRICS_VERSION - - memset(header, 0xFF, structure_size); - - header->format_revision = frev; - header->content_revision = crev; - header->structure_size = structure_size; - -} - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index a020277dec3e..a608cdbdada4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,6 +40,31 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ + struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + +#define smu_cmn_init_partition_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + extern const int link_speed[]; /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ @@ -125,8 +150,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, int smu_cmn_get_combo_pptable(struct smu_context *smu); -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); |