diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu')
24 files changed, 833 insertions, 253 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 244b8c364d45..f51fa265230b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu, static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); -static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); @@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) /* Enable restore flag */ smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; - /* set the user dpm power limit */ - if (smu->user_dpm_profile.power_limit) { - ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); + /* set the user dpm power limits */ + for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { + if (!smu->user_dpm_profile.power_limits[i]) + continue; + ret = smu_set_power_limit(smu, i, + smu->user_dpm_profile.power_limits[i]); if (ret) - dev_err(smu->adev->dev, "Failed to set power limit value\n"); + dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); } /* set the user dpm clock configurations */ @@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) return true; } +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *read_arg) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) + ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); + + return ret; +} static int smu_sys_get_pp_table(void *handle, char **table) @@ -620,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle, return -EOPNOTSUPP; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) - return -EINVAL; + return -EOPNOTSUPP; if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; @@ -1655,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); ret = smu_system_features_control(smu, true); - if (ret) + if (ret) { dev_err(adev->dev, "Failed system features control!\n"); - return ret; + return ret; + } + + return smu_enable_thermal_alert(smu); } break; default: @@ -2231,7 +2248,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2263,18 +2279,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; - if (smu->current_power_limit) { - ret = smu_set_power_limit(smu, smu->current_power_limit); - if (ret && ret != -EOPNOTSUPP) - return ret; - } - - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { - ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); - if (ret) - return ret; - } - dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2802,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = { .funcs = &smu_ip_funcs, }; +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle) +{ + struct smu_context *smu = (struct smu_context *)handle; + const struct ras_smu_drv *tmp = NULL; + int ret; + + ret = smu_get_ras_smu_drv(smu, &tmp); + + return ret ? NULL : tmp; +} + static int smu_load_microcode(void *handle) { struct smu_context *smu = handle; @@ -2895,6 +2910,9 @@ int smu_get_power_limit(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!limit) + return -EINVAL; + switch (pp_power_type) { case PP_PWR_TYPE_SUSTAINED: limit_type = SMU_DEFAULT_PPT_LIMIT; @@ -2926,6 +2944,8 @@ int smu_get_power_limit(void *handle, if (limit_type != SMU_DEFAULT_PPT_LIMIT) { if (smu->ppt_funcs->get_ppt_limit) ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); + else + return -EOPNOTSUPP; } else { switch (limit_level) { case SMU_PPT_LIMIT_CURRENT: @@ -2964,37 +2984,34 @@ int smu_get_power_limit(void *handle, return ret; } -static int smu_set_power_limit(void *handle, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct smu_context *smu = handle; - uint32_t limit_type = limit >> 24; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - limit &= (1<<24)-1; - if (limit_type != SMU_DEFAULT_PPT_LIMIT) - if (smu->ppt_funcs->set_power_limit) - return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - - if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { - dev_err(smu->adev->dev, - "New power limit (%d) is out of range [%d,%d]\n", - limit, smu->min_power_limit, smu->max_power_limit); - return -EINVAL; + if (limit_type == SMU_DEFAULT_PPT_LIMIT) { + if (!limit) + limit = smu->current_power_limit; + if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { + dev_err(smu->adev->dev, + "New power limit (%d) is out of range [%d,%d]\n", + limit, smu->min_power_limit, smu->max_power_limit); + return -EINVAL; + } } - if (!limit) - limit = smu->current_power_limit; - if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) - smu->user_dpm_profile.power_limit = limit; + if (ret) + return ret; + if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) + smu->user_dpm_profile.power_limits[limit_type] = limit; } - return ret; + return 0; } static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 582c186d8b62..8815fc70b63b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -212,6 +212,7 @@ enum smu_power_src_type { enum smu_ppt_limit_type { SMU_DEFAULT_PPT_LIMIT = 0, SMU_FAST_PPT_LIMIT, + SMU_LIMIT_TYPE_COUNT, }; enum smu_ppt_limit_level { @@ -231,7 +232,7 @@ enum smu_memory_pool_size { struct smu_user_dpm_profile { uint32_t fan_mode; - uint32_t power_limit; + uint32_t power_limits[SMU_LIMIT_TYPE_COUNT]; uint32_t fan_speed_pwm; uint32_t fan_speed_rpm; uint32_t flags; @@ -1521,6 +1522,21 @@ struct pptable_funcs { */ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, void *table); + /** + * @ras_send_msg: Send a message with a parameter from Ras + * &msg: Type of message. + * ¶m: Message parameter. + * &read_arg: SMU response (optional). + */ + int (*ras_send_msg)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); + + + /** + * @get_ras_smu_drv: Get RAS smu driver interface + * Return: ras_smu_drv * + */ + int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv); }; typedef enum { @@ -1785,7 +1801,10 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle); +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *readarg); #endif void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index bf6aa9620911..dd30d96e1ca2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -87,7 +87,7 @@ typedef enum { /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, /*39*/ FEATURE_GLOBAL_DPM = 39, -/*40*/ FEATURE_NODE_POWER_MANAGER = 40, +/*40*/ FEATURE_HROM_EN = 40, /*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; @@ -189,7 +189,7 @@ typedef enum { SVI_MAX_TEMP_ENTRIES, // 13 } SVI_TEMP_e; -#define SMU_METRICS_TABLE_VERSION 0x14 +#define SMU_METRICS_TABLE_VERSION 0x15 #define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 @@ -367,6 +367,11 @@ typedef struct { //Node Power Limit uint32_t MaxNodePowerLimit; + + // PPT1 Configuration + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h index 4b066c42e0ec..d09b6ae9827e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -105,23 +105,21 @@ #define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C #define PPSMC_MSG_ResetSDMA 0x4D #define PPSMC_MSG_GetRasTableVersion 0x4E -#define PPSMC_MSG_GetRmaStatus 0x4F -#define PPSMC_MSG_GetErrorCount 0x50 -#define PPSMC_MSG_GetBadPageCount 0x51 -#define PPSMC_MSG_GetBadPageInfo 0x52 -#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53 -#define PPSMC_MSG_SetTimestampLoHi 0x54 -#define PPSMC_MSG_GetTimestampLoHi 0x55 -#define PPSMC_MSG_GetRasPolicy 0x56 -#define PPSMC_MSG_DumpErrorRecord 0x57 +#define PPSMC_MSG_GetBadPageCount 0x50 +#define PPSMC_MSG_GetBadPageMcaAddress 0x51 +#define PPSMC_MSG_SetTimestamp 0x53 +#define PPSMC_MSG_SetTimestampHi 0x54 +#define PPSMC_MSG_GetTimestamp 0x55 +#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57 #define PPSMC_MSG_EraseRasTable 0x58 #define PPSMC_MSG_GetStaticMetricsTable 0x59 #define PPSMC_MSG_ResetVfArbitersByIndex 0x5A -#define PPSMC_MSG_GetBadPageSeverity 0x5B #define PPSMC_MSG_GetSystemMetricsTable 0x5C #define PPSMC_MSG_GetSystemMetricsVersion 0x5D #define PPSMC_MSG_ResetVCN 0x5E -#define PPSMC_Message_Count 0x5F +#define PPSMC_MSG_SetFastPptLimit 0x5F +#define PPSMC_MSG_GetFastPptLimit 0x60 +#define PPSMC_Message_Count 0x61 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 2256c77da636..9b71a8afdd35 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -279,7 +279,16 @@ __SMU_DUMMY_MAP(ResetSDMA), \ __SMU_DUMMY_MAP(ResetVCN), \ __SMU_DUMMY_MAP(GetStaticMetricsTable), \ - __SMU_DUMMY_MAP(GetSystemMetricsTable), + __SMU_DUMMY_MAP(GetSystemMetricsTable), \ + __SMU_DUMMY_MAP(GetRASTableVersion), \ + __SMU_DUMMY_MAP(GetBadPageCount), \ + __SMU_DUMMY_MAP(GetBadPageMcaAddr), \ + __SMU_DUMMY_MAP(SetTimestamp), \ + __SMU_DUMMY_MAP(GetTimestamp), \ + __SMU_DUMMY_MAP(GetBadPageIpid), \ + __SMU_DUMMY_MAP(EraseRasTable), \ + __SMU_DUMMY_MAP(SetFastPptLimit), \ + __SMU_DUMMY_MAP(GetFastPptLimit), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -458,7 +467,8 @@ enum smu_clk_type { __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ __SMU_DUMMY_MAP(FAN_ABNORMAL), \ - __SMU_DUMMY_MAP(PIT), + __SMU_DUMMY_MAP(PIT), \ + __SMU_DUMMY_MAP(HROM_EN), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 9548bd3c624b..55401e6b2b0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int ret = 0, size = 0; + int ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0; int i; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; } - return size; + return size - start_offset; } static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0028f10ead42..7c9f77124ab2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, levels, size = 0, ret = 0; + int i, levels, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_DCEFCLK: ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) - return size; + return size - start_offset; ret = navi10_is_support_fine_grained_dpm(smu, clk_type); if (ret < 0) @@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) - return size; + return size - start_offset; size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); @@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu, } else { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) - return size; + return size - start_offset; freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : @@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int navi10_force_clk_levels(struct smu_context *smu, @@ -2888,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu) dummy_table += 0x1000; } - amdgpu_asic_flush_hdp(smu->adev, NULL); + amdgpu_hdp_flush(smu->adev, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 31c2c0386b1f..774283ac7827 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; - int i, size = 0, ret = 0; + int i, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int sienna_cichlid_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 78e4186d06cc..b0d6487171d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu) int smu_v11_0_disable_thermal_alert(struct smu_context *smu) { - return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + int ret = 0; + + if (smu->smu_table.thermal_controller_type) + ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + + return ret; } static uint16_t convert_to_vddc(uint8_t vid) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 0708d0f0938b..9626da2dba58 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_print_clk_levels(struct smu_context *smu, @@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_common_print_clk_levels(struct smu_context *smu, @@ -2311,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; uint32_t ppt_limit; int ret = 0; @@ -2348,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu, } static int vangogh_get_ppt_limit(struct smu_context *smu, - uint32_t *ppt_limit, - enum smu_ppt_limit_type type, - enum smu_ppt_limit_level level) + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; if (!power_context) return -EOPNOTSUPP; @@ -2402,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu, smu->current_power_limit = ppt_limit; break; case SMU_FAST_PPT_LIMIT: - ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); if (ppt_limit > power_context->max_fast_ppt_limit) { dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 3baf20f4c373..eaa9ea162f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; bool cur_value_match_level = false; @@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_RANGE: @@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, i == 2 ? "*" : ""); } - return size; + return size - start_offset; case SMU_SOCCLK: count = NUM_SOCCLK_DPM_LEVELS; cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; @@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index c1062e5f0393..677781060246 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index cb3fea9e8cf3..9e635f733fbf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -34,6 +34,7 @@ #include "amdgpu_fru_eeprom.h" #include <linux/pci.h> #include "smu_cmn.h" +#include "amdgpu_ras.h" #undef MP1_Public #undef smnMP1_FIRMWARE_FLAGS @@ -58,7 +59,7 @@ #define NUM_JPEG_RINGS_FW 10 #define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ - (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4) const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), @@ -81,6 +82,7 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN), }; const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { @@ -139,6 +141,15 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), + MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0), + MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0), + MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0), + MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0), + MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0), + MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0), + MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0), + MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1), + MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1), }; int smu_v13_0_12_tables_init(struct smu_context *smu) @@ -345,6 +356,12 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) pptable->MaxNodePowerLimit = SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + static_metrics->PPT1Max) { + pptable->PPT1Max = static_metrics->PPT1Max; + pptable->PPT1Min = static_metrics->PPT1Min; + pptable->PPT1Default = static_metrics->PPT1Default; + } smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -449,7 +466,7 @@ static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); smu_table_cache_update_time(sys_table, jiffies); memcpy(sys_table->cache.buffer, table->cpu_addr, smu_v13_0_12_get_system_metrics_size()); @@ -719,15 +736,14 @@ static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu, ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) { const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; struct amdgpu_device *adev = smu->adev; MetricsTable_t *metrics; int inst, j, k, idx; u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); idx = 0; for_each_inst(k, inst_mask) { @@ -772,22 +788,17 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp return sizeof(*xcp_metrics); } -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics) { - struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; u8 num_jpeg_rings_gpu_metrics; MetricsTable_t *metrics; - struct amdgpu_xcp *xcp; - u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature); /* Individual HBM stack temperature is not reported */ @@ -877,60 +888,186 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); - - for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings_gpu_metrics) + j] = - SMUQ10_ROUND(metrics->JpegBusy - [(inst * NUM_JPEG_RINGS_FW) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(metrics->VcnBusy[inst]); - idx++; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) + + j] = + SMUQ10_ROUND( + metrics->JpegBusy[(inst * + NUM_JPEG_RINGS_FW) + + j]); } + gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]); + } - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(metrics->GfxBusy[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; - } + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->gfx_busy_acc[i] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, + SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics + ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics + ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND( + metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitTotalAcc[inst]); + }; } gpu_metrics->xgmi_link_width = metrics->XgmiWidth; gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; gpu_metrics->firmware_timestamp = metrics->Timestamp; - - *table = (void *)gpu_metrics; - - return sizeof(*gpu_metrics); } const struct smu_temp_funcs smu_v13_0_12_temp_funcs = { .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported, .get_temp_metrics = smu_v13_0_12_get_temp_metrics, }; + +static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev, + uint32_t *table_version) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetRASTableVersion, 0, table_version); +} + +static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count, + uint32_t timeout) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint64_t end, now; + int ret = 0; + + now = (uint64_t)ktime_to_ms(ktime_get()); + end = now + timeout; + do { + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageCount, 0, count); + /* eeprom is not ready */ + if (ret != -EBUSY) + return ret; + mdelay(10); + now = (uint64_t)ktime_to_ms(ktime_get()); + } while (now < end); + + dev_err(adev->dev, + "smu get bad page count timeout!\n"); + return ret; +} + +static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0); +} + +static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp; + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetTimestamp, index, &temp); + if (!ret) + *timestamp = temp; + + return ret; +} + +static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_ipid_lo, temp_ipid_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high); + if (!ret) + *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo; + return ret; +} + +static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_EraseRasTable, 0, result); +} + +static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_addr_lo, temp_addr_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high); + if (!ret) + *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo; + return ret; +} + +static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = { + .get_ras_table_version = smu_v13_0_12_get_ras_table_version, + .get_badpage_count = smu_v13_0_12_get_badpage_count, + .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr, + .set_timestamp = smu_v13_0_12_set_timestamp, + .get_timestamp = smu_v13_0_12_get_timestamp, + .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid, + .erase_ras_table = smu_v13_0_12_erase_ras_table, +}; + +static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + if (!flags) + return; + + *flags = 0ULL; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM))) + *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM; + +} + +const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = { + .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs, + .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags, +}; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index b081ae3e8f43..6908f9930f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_4_read_sensor(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index f5db181ef489..4576bf008b22 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -861,11 +861,12 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 285cf7979693..44e1cd821eec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -356,6 +356,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver > 0x04560900) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); + if (fw_ver >= 0x04560D00) + smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT)); + if (fw_ver >= 0x04560700) { if (fw_ver >= 0x04560900) { smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); @@ -450,7 +453,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); - if ((pgm == 0) && (fw_ver >= 0x00558200)) + if ((pgm == 0 && fw_ver >= 0x00558200) || + (pgm == 7 && fw_ver >= 0x07551400)) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } @@ -548,7 +552,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - void *gpu_metrics_table __free(kfree) = NULL; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; void *driver_pptable __free(kfree) = NULL; void *metrics_table __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; @@ -578,24 +582,28 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); - gpu_metrics_table = - kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); - if (!gpu_metrics_table) - return -ENOMEM; - driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); if (!driver_pptable) return -ENOMEM; + ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS, + sizeof(struct smu_v13_0_6_gpu_metrics), 1); + if (ret) + return ret; + + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) { ret = smu_v13_0_12_tables_init(smu); - if (ret) + if (ret) { + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return ret; + } } - smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table); smu_table->metrics_table = no_free_ptr(metrics_table); smu_table->driver_pptable = no_free_ptr(driver_pptable); @@ -731,6 +739,7 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu) { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) smu_v13_0_12_tables_fini(smu); + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return smu_v13_0_fini_smc_tables(smu); } @@ -765,7 +774,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); smu_table->metrics_time = jiffies; @@ -844,12 +853,23 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); return 0; } +static void smu_v13_0_6_update_caps(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + !pptable->PPT1Max) + smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT)); +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -866,8 +886,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_setup_driver_pptable(smu); + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_setup_driver_pptable(smu); + if (ret) + return ret; + goto out; + } /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -947,7 +971,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); } } - +out: + smu_v13_0_6_update_caps(smu); return 0; } @@ -1393,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, return -EINVAL; if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) { - size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); + size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); for (i = 0; i < clocks.num_levels; i++) size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i, clocks.data[i].clocks_in_khz / @@ -1428,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { - int now, size = 0; + int now, size = 0, start_offset = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct smu_13_0_dpm_table *single_dpm_table; @@ -1437,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, uint32_t min_clk, max_clk; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } dpm_context = smu_dpm->dpm_context; @@ -1512,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.uclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "mclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "mclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_SOCCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); @@ -1526,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.soc_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "socclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "socclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_FCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); @@ -1540,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.fclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "fclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "fclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_VCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, &now); @@ -1554,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.vclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "vclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "vclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_DCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, &now); @@ -1568,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.dclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "dclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "dclk"); + if (ret < 0) + return ret; + size += ret; + break; default: break; } - return size; + return size - start_offset; } static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, @@ -1845,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu, if (current_power_limit) *current_power_limit = power_limit; if (default_power_limit) - *default_power_limit = power_limit; + *default_power_limit = pptable->MaxSocketPowerLimit; if (max_power_limit) { *max_power_limit = pptable->MaxSocketPowerLimit; @@ -1860,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) { + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret; + + if (limit_type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) { + dev_err(smu->adev->dev, + "New power limit (%d) should be between min %d max %d\n", + limit, pptable->PPT1Min, pptable->PPT1Max); + return -EINVAL; + } + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit, + limit, NULL); + if (ret) + dev_err(smu->adev->dev, "Set fast PPT limit failed!\n"); + return ret; + } + return smu_v13_0_set_power_limit(smu, limit_type, limit); } +static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu, + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret = 0; + + if (type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + switch (level) { + case SMU_PPT_LIMIT_MAX: + *ppt_limit = pptable->PPT1Max; + break; + case SMU_PPT_LIMIT_CURRENT: + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit); + if (ret) + dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); + break; + case SMU_PPT_LIMIT_DEFAULT: + *ppt_limit = pptable->PPT1Default; + break; + case SMU_PPT_LIMIT_MIN: + *ppt_limit = pptable->PPT1Min; + break; + default: + return -EOPNOTSUPP; + } + return ret; + } + return -EOPNOTSUPP; +} + static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2383,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, memcpy(table->cpu_addr, table_data, table_size); /* Flush hdp cache */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction, NULL); @@ -2627,7 +2730,7 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, { const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; int version = smu_v13_0_6_get_metrics_version(smu); - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; int ret, inst, i, j, k, idx; @@ -2647,8 +2750,8 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, if (i == adev->xcp_mgr->num_xcps) return -EINVAL; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; + smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1); metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!metrics_v0) @@ -2740,18 +2843,16 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + struct smu_table *tables = smu_table->tables; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; int version = smu_v13_0_6_get_metrics_version(smu); MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; - struct amdgpu_xcp *xcp; u16 link_width_level; u8 num_jpeg_rings; - u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); @@ -2759,16 +2860,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table if (ret) return ret; - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0, + gpu_metrics); + goto fill; + } metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ @@ -2889,55 +2994,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); - - for (j = 0; j < num_jpeg_rings; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) - [(inst * num_jpeg_rings) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); - idx++; - - } + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + inst = GET_INST(JPEG, i); + for (j = 0; j < num_jpeg_rings; ++j) + gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + gpu_metrics->vcn_busy[i] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + } - if (per_inst) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, - version)[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; + if (per_inst) { + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->gfx_below_host_limit_ppt_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + gpu_metrics->gfx_below_host_limit_thm_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + gpu_metrics->gfx_low_utilization_acc + [i] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); } } } @@ -2947,7 +3046,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); - *table = (void *)gpu_metrics; +fill: + *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer; return sizeof(*gpu_metrics); } @@ -3226,6 +3326,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) return ret; } +static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg) +{ + int ret; + + switch (msg) { + case SMU_MSG_QueryValidMcaCount: + case SMU_MSG_QueryValidMcaCeCount: + case SMU_MSG_McaBankDumpDW: + case SMU_MSG_McaBankCeDumpDW: + case SMU_MSG_ClearMcaOnRead: + ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg); + break; + default: + ret = -EPERM; + } + + return ret; +} static int smu_v13_0_6_post_init(struct smu_context *smu) { @@ -3863,6 +3981,29 @@ static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; } +static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv) +{ + if (!ras_smu_drv) + return -EINVAL; + + if (amdgpu_sriov_vf(smu->adev)) + return -EOPNOTSUPP; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT)) + smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM)); + + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + *ras_smu_drv = &smu_v13_0_12_ras_smu_drv; + break; + default: + *ras_smu_drv = NULL; + break; + } + + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3894,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_enabled_mask = smu_v13_0_6_get_enabled_mask, .feature_is_enabled = smu_cmn_feature_is_enabled, .set_power_limit = smu_v13_0_6_set_power_limit, + .get_ppt_limit = smu_v13_0_6_get_ppt_limit, .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, .register_irq_handler = smu_v13_0_6_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, @@ -3921,6 +4063,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .reset_sdma = smu_v13_0_6_reset_sdma, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, .post_init = smu_v13_0_6_post_init, + .ras_send_msg = smu_v13_0_6_ras_send_msg, + .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 7ef5f3e66c27..6cbdd7c5ded9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -50,6 +50,9 @@ struct PPTable_t { uint32_t MinLclkDpmRange; uint64_t PublicSerialNumber_AID; uint32_t MaxNodePowerLimit; + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; bool Init; }; @@ -72,9 +75,18 @@ enum smu_v13_0_6_caps { SMU_CAP(PLDM_VERSION), SMU_CAP(TEMP_METRICS), SMU_CAP(NPM_METRICS), + SMU_CAP(RAS_EEPROM), + SMU_CAP(FAST_PPT), SMU_CAP(ALL), }; +#define SMU_13_0_6_NUM_XGMI_LINKS 8 +#define SMU_13_0_6_MAX_GFX_CLKS 8 +#define SMU_13_0_6_MAX_CLKS 4 +#define SMU_13_0_6_MAX_XCC 8 +#define SMU_13_0_6_MAX_VCN 4 +#define SMU_13_0_6_MAX_JPEG 40 + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); @@ -87,7 +99,6 @@ size_t smu_v13_0_12_get_system_metrics_size(void); int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics); @@ -99,4 +110,156 @@ int smu_v13_0_12_get_npm_data(struct smu_context *smu, extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; +extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv; + +#if defined(SWSMU_CODE_LAYER_L2) +#include "smu_cmn.h" + +/* SMUv 13.0.6 GPU metrics*/ +#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_hotspot); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_mem); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_vrsoc); \ + SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \ + SMU_MTYPE(U16), curr_socket_power); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_gfx_activity); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_umc_activity); \ + SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), mem_max_bandwidth); \ + SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), energy_accumulator); \ + SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ + SMU_MTYPE(U64), system_clock_counter); \ + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), accumulation_counter); \ + SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), prochot_residency_acc); \ + SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), ppt_residency_acc); \ + SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), socket_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), vr_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), hbm_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), gfxclk_lock_status); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), pcie_link_width); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \ + SMU_MTYPE(U16), pcie_link_speed); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_width); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \ + SMU_MTYPE(U16), xgmi_link_speed); \ + SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_activity_acc); \ + SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), mem_activity_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U64), pcie_bandwidth_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), pcie_bandwidth_inst); \ + SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_rover_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_sent_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \ + SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_read_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_write_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_status, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ + SMU_MTYPE(U64), firmware_timestamp); \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \ + SMU_MUNIT(NONE), SMU_MTYPE(U32), \ + pcie_lc_perf_other_end_recovery); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS); +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics); + +#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, + SMU_13_0_6_PARTITION_METRICS_FIELDS); + +#endif /* SWSMU_CODE_LAYER_L2 */ + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c96fa5e49ed6..a3fc35b9011e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 73b4506ef5a8..5d7e671fa3c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; uint32_t clk_limit = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int yellow_carp_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index fe00c84b1cc6..b1bd946d8e30 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, ret = 0, size = 0; + int i, idx, ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 086501cc5213..2cea688c604f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, struct smu_14_0_dpm_table *single_dpm_table; struct smu_14_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index a8961a8f5c42..4040ff926544 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -164,9 +164,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, msg_index, param, message); break; case SMU_RESP_BUSY_OTHER: - dev_err_ratelimited(adev->dev, - "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", - msg_index, param, message); + /* It is normal for SMU_MSG_GetBadPageCount to return busy + * so don't print error at this case. + */ + if (msg != SMU_MSG_GetBadPageCount) + dev_err_ratelimited(adev->dev, + "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", + msg_index, param, message); break; case SMU_RESP_DEBUG_END: dev_err_ratelimited(adev->dev, @@ -980,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu, * Flush hdp cache: to guard the content seen by * GPU is consitent with CPU. */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); } ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? @@ -992,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu, return ret; if (!drv2smu) { - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table_data, table->cpu_addr, table_size); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 0ae91c8b6d72..8d7c4814c68f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -202,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu, u32 workload_mask, u32 *backend_workload_mask); +/*SMU gpu metrics */ + +/* Attribute ID mapping */ +#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X +/* Type ID mapping */ +#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X +/* Unit ID mapping */ +#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X + +/* Map TYPEID to C type */ +#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID + +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64 + +/* struct members */ +#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME + +#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME[SIZE] + +/* Init functions for scalar/array fields - init to 0xFFs */ +#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \ + obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \ + count++; \ + } while (0) + +#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \ + memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \ + count++; \ + } while (0) + +/* Declare Metrics Class and Template object */ +#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \ + struct __packed CLASSNAME { \ + struct metrics_table_header header; \ + int attr_count; \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \ + }; \ + static inline void CLASSNAME##_init(struct CLASSNAME *obj, \ + uint8_t frev, uint8_t crev) \ + { \ + int count = 0; \ + memset(obj, 0xFF, sizeof(*obj)); \ + obj->header.format_revision = frev; \ + obj->header.content_revision = crev; \ + obj->header.structure_size = sizeof(*obj); \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \ + SMU_METRICS_INIT_ARRAY) \ + obj->attr_count = count; \ + } + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index c09ecf1a68a0..34f6b4b1c3ba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -100,6 +100,7 @@ #define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu) #define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable) #define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range) +#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv) #endif #endif |
