summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c162
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h44
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c76
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c178
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c136
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c368
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h35
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c105
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h27
27 files changed, 1130 insertions, 592 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 46cce1d2aaf3..756afe78a6e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -76,6 +76,7 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,12 +135,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
}
int smu_set_soft_freq_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
+ enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
+ enum smu_clk_type clk_type;
int ret = 0;
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
@@ -307,6 +313,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret;
+
+ if (!smu->ppt_funcs->dpm_set_isp_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->isp_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->isp_gated, !enable);
+
+ return ret;
+}
+
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -408,6 +434,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_ISP:
+ ret = smu_dpm_set_isp_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -1004,6 +1036,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
+static void smu_update_gpu_addresses(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
+
+ if (pm_status_table->bo)
+ pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
+ if (driver_table->bo)
+ driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
+ if (dummy_read_1_table->bo)
+ dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
+}
+
/**
* smu_alloc_memory_pool - allocate memory pool in the system memory
*
@@ -1285,6 +1332,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu_init_power_profile(smu);
@@ -1672,37 +1720,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
}
- ret = smu_system_features_control(smu, true);
- if (ret) {
- dev_err(adev->dev, "Failed to enable requested dpm features!\n");
- return ret;
- }
-
- smu_init_xgmi_plpd_mode(smu);
-
- ret = smu_feature_get_enabled_mask(smu, &features_supported);
- if (ret) {
- dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
- return ret;
- }
- bitmap_copy(feature->supported,
- (unsigned long *)&features_supported,
- feature->feature_num);
-
- if (!smu_is_dpm_running(smu))
- dev_info(adev->dev, "dpm has been disabled\n");
-
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
pcie_gen = 4;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -1738,6 +1755,37 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_system_features_control(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable requested dpm features!\n");
+ return ret;
+ }
+
+ smu_init_xgmi_plpd_mode(smu);
+
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
+ if (!smu_is_dpm_running(smu))
+ dev_info(adev->dev, "dpm has been disabled\n");
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ ret = smu_set_default_dpm_table(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+ return ret;
+ }
+
ret = smu_get_thermal_temperature_range(smu);
if (ret) {
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
@@ -1780,6 +1828,9 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ smu_update_gpu_addresses(smu);
+
smu->smc_fw_state = SMU_FW_INIT;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -2935,6 +2986,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_DCLK; break;
case PP_DCLK1:
clk_type = SMU_DCLK1; break;
+ case PP_ISPICLK:
+ clk_type = SMU_ISPICLK;
+ break;
+ case PP_ISPXCLK:
+ clk_type = SMU_ISPXCLK;
+ break;
case OD_SCLK:
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
@@ -3432,15 +3489,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
bool ret = false;
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
+ if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
+ ret = smu->ppt_funcs->link_reset_is_support(smu);
return ret;
}
@@ -3475,6 +3532,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3745,6 +3815,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
@@ -3803,6 +3886,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
+ .get_xcp_metrics = smu_sys_get_xcp_metrics,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -3975,3 +4059,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index dd6d0e7aa242..b52e194397e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -402,6 +402,7 @@ struct smu_power_gate {
atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
+ atomic_t isp_gated;
atomic_t umsch_mm_gated;
};
@@ -438,9 +439,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -1229,10 +1232,11 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
/**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
+ * @link_reset_is_support: Check if GPU supports link reset.
*/
- bool (*mode2_reset_is_support)(struct smu_context *smu);
+ bool (*link_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1252,6 +1256,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1383,6 +1394,11 @@ struct pptable_funcs {
bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1421,6 +1437,12 @@ struct pptable_funcs {
int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
/**
+ * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
+ * management.
+ */
+ int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
+
+ /**
* @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
* management.
*/
@@ -1451,6 +1473,12 @@ struct pptable_funcs {
*/
int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
struct freq_band_range *exclusion_ranges);
+ /**
+ * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
+ * Return: Size of table
+ */
+ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
+ void *table);
};
typedef enum {
@@ -1601,8 +1629,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1613,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
uint32_t min, uint32_t max);
int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
@@ -1643,6 +1672,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 1bc30db22f9c..cd44f4254134 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -106,6 +106,7 @@ typedef struct {
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_MEM_PSTATE_LEVELS 4
+#define ISP_ALL_TILES_MASK 0x7FF
typedef struct {
uint32_t UClk;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index d7505cfc433a..0a2ca544f4e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -86,8 +86,10 @@ typedef enum {
/*36*/ FEATURE_PIT = 36,
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+/*39*/ FEATURE_GLOBAL_DPM = 39,
+/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
-/*39*/ NUM_FEATURES = 39
+/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -133,7 +135,7 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x12
+#define SMU_METRICS_TABLE_VERSION 0x13
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -275,6 +277,16 @@ typedef struct {
//PSNs
uint64_t PublicSerialNumber_AID[4];
uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI
+ uint32_t MaxXgmiWidth;
+ uint32_t MaxXgmiBitrate;
+
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+
+ // General info
+ uint32_t pldmVersion[2];
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index d26f35119a12..01790a927930 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -127,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x10
+#define SMU_METRICS_TABLE_VERSION 0x11
// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
@@ -459,4 +459,13 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+ // General info
+ uint32_t pldmVersion[2];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 288b2576432b..41f268313613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,7 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_ResetVCN 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c9dee09395e3..d7a9e41820fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -277,6 +277,7 @@
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
@@ -304,6 +305,8 @@ enum smu_clk_type {
SMU_MCLK,
SMU_PCIE,
SMU_LCLK,
+ SMU_ISPICLK,
+ SMU_ISPXCLK,
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cd03caffe317..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -112,6 +112,7 @@ struct smu_13_0_dpm_context {
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
-bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
-int smu_v13_0_12_get_max_metrics_size(void);
-int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
-int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
-extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
-extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 453952cdc353..9ad46f545d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1347,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 7fad5dfb39c4..aac202d0c30e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
@@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
- (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
- pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
-
- if (ret)
- return ret;
-
- if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (pptable->PcieLaneCount[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
+ pptable->PcieLaneCount[i] > pcie_width_cap) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
+ pptable->PcieGenSpeed[i] > pcie_gen_cap ?
+ pcie_gen_cap : pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
+ pptable->PcieLaneCount[i] > pcie_width_cap ?
+ pcie_width_cap : pptable->PcieLaneCount[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_gen_cap << 8;
+ smu_pcie_arg |= pcie_width_cap;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static inline void navi10_dump_od_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..d57591509aed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t min_gen_speed, max_gen_speed;
uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
@@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16 |
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) ||
+ table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) {
+ smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
- return 0;
+ return ret;
}
static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
@@ -3089,11 +3093,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3228,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 25fabf336a64..78e4186d06cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a55ea76d7399..2c9869feba61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_CCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
- (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
- (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
- smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
- size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
- smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
- }
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
break;
case SMU_SOCCLK:
/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9481f897432d..e97b0cf19197 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_OD_RANGE:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMinGfxclkFrequency,
- 0, &min);
- if (ret)
- return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GetMaxGfxclkFrequency,
- 0, &max);
- if (ret)
- return ret;
- size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
break;
case SMU_OD_SCLK:
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
- max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
- size += sysfs_emit_at(buf, size, "OD_SCLK\n");
- size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
- size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
- }
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sysfs_emit_at(buf, size, "OD_SCLK\n");
+ size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min);
+ size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 83163d7c7f00..c63d2e28954d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
+static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk;
+
+ if (amdgpu_sriov_vf(smu->adev)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+
+ if (min) {
+ if (!min_clk)
+ return -ENODATA;
+ *min = min_clk;
+ }
+ if (max) {
+ if (!max_clk)
+ return -ENODATA;
+ *max = max_clk;
+ }
+
+ } else {
+ return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ }
+
+ return 0;
+}
+
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -1270,6 +1325,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1338,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1483,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1976,11 +2040,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2077,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
.get_bamaco_support = aldebaran_get_bamaco_support,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
@@ -2086,7 +2145,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ba5a9012dbd5..1a1f2a6b2e52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -709,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -761,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1073,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1647,45 +1579,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2493,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ int ret = 0;
+ int i;
if (!num_of_levels)
return 0;
@@ -2509,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
}
}
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
+ return ret;
}
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
@@ -2595,3 +2497,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 5a9711e8cf68..e084ed99ec0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ uint32_t smu_pcie_arg;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 238bd71baa6d..02a455a31c25 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,28 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
-static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
+static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
- struct smu_table *table = &smu_table->driver_table;
+ uint16_t max_speed;
+ uint8_t max_width;
int ret;
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
- if (ret) {
- dev_info(smu->adev->dev,
- "Failed to export static metrics table!\n");
- return ret;
- }
-
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+ if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
+ max_width = (uint8_t)static_metrics->MaxXgmiWidth;
+ max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ ret = 0;
+ } else {
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
- return 0;
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (!ret) {
+ max_width = (uint8_t)metrics->XgmiWidth;
+ max_speed = (uint16_t)metrics->XgmiBitrate;
+ }
+ }
+ if (!ret)
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
}
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
@@ -217,7 +223,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
int ret, i;
if (!pptable->Init) {
- ret = smu_v13_0_12_get_static_metrics_table(smu);
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
@@ -257,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+ }
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+ smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -283,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
int xcc_id;
/* For clocks with multiple instances, only report the first one */
@@ -339,14 +356,70 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- return ret;
+ return 0;
+}
+
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
+{
+ const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ MetricsTable_t *metrics;
+ int inst, j, k, idx;
+ u32 inst_mask;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instance */
+ inst = GET_INST(VCN, k);
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(metrics->
+ JpegBusy[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ metrics->VclkFrequency[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ metrics->DclkFrequency[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ metrics->SocclkFrequency[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(metrics->UclkFrequency);
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]);
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+ idx++;
+ }
+
+ return sizeof(*xcp_metrics);
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
u8 num_jpeg_rings_gpu_metrics;
@@ -354,10 +427,9 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
struct amdgpu_xcp *xcp;
u32 inst_mask;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+ metrics = (MetricsTable_t *)smu_metrics;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
@@ -436,13 +508,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
+ gpu_metrics->xgmi_write_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -474,6 +549,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
SMUQ10_ROUND(metrics->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -484,7 +569,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->firmware_timestamp = metrics->Timestamp;
*table = (void *)gpu_metrics;
- kfree(metrics);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index c478b3be37af..9cc294f4708b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
-
-enum smu_v13_0_6_caps {
- SMU_CAP(DPM),
- SMU_CAP(DPM_POLICY),
- SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(SET_UCLK_MAX),
- SMU_CAP(PCIE_METRICS),
- SMU_CAP(MCA_DEBUG_MODE),
- SMU_CAP(PER_INST_METRICS),
- SMU_CAP(CTF_LIMIT),
- SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND),
- SMU_CAP(SDMA_RESET),
- SMU_CAP(STATIC_METRICS),
- SMU_CAP(ALL),
-};
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -194,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
@@ -299,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
dpm_context->caps &= ~BIT_ULL(cap);
}
-static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
- enum smu_v13_0_6_caps cap)
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -328,6 +312,11 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551600) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
@@ -353,6 +342,14 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+
+ if (fw_ver >= 0x04560100) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,6 +399,17 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -525,7 +533,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -682,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
return 0;
}
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
- void *metrics_table, bool bypass_cache)
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
@@ -747,9 +755,48 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
@@ -758,8 +805,11 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
+ uint16_t max_speed;
+ uint8_t max_width;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
@@ -792,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
+ max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
+ max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
+ amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
@@ -813,6 +866,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
return 0;
@@ -822,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- uint32_t clock_limit = 0, param;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ /* Use dpm tables, if data is already fetched */
+ if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
- if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
- if (pptable->Init)
- clock_limit = pptable->MinGfxclkFrequency;
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
- if (pptable->Init)
- clock_limit = pptable->SocclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
- if (pptable->Init)
- clock_limit = pptable->FclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
- if (pptable->Init)
- clock_limit = pptable->VclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
- if (pptable->Init)
- clock_limit = pptable->DclkFrequencyTable[0];
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
- break;
+ return -EINVAL;
}
- if (min)
- *min = clock_limit;
+ min_clk = dpm_table->min;
+ max_clk = dpm_table->max;
+ if (min)
+ *min = min_clk;
if (max)
- *max = clock_limit;
+ *max = max_clk;
- return 0;
+ if (min_clk && max_clk)
+ return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
@@ -1142,7 +1201,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
@@ -1327,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- min_clk = pstate_table->gfxclk_pstate.curr.min;
- max_clk = pstate_table->gfxclk_pstate.curr.max;
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ min_clk = single_dpm_table->min;
+ max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -1616,6 +1677,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1660,6 +1722,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1927,7 +1998,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2140,7 +2211,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2483,11 +2554,131 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
return pcie_gen_to_speed(speed_level + 1);
}
+static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
+ void *table)
+{
+ const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, inst, i, j, k, idx;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+ bool per_inst;
+
+ if (!table)
+ return sizeof(*xcp_metrics);
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ if (xcp->id == xcp_id)
+ break;
+ }
+ if (i == adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_v0)
+ return -ENOMEM;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret) {
+ kfree(metrics_v0);
+ return ret;
+ }
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
+ goto out;
+ }
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(VclkFrequency, version)[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(DclkFrequency, version)[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
+
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
+ version)[inst]);
+
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ xcp_metrics->gfx_low_utilization_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
+ }
+ idx++;
+ }
+ }
+out:
+ kfree(metrics_v0);
+
+ return sizeof(*xcp_metrics);
+}
+
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
@@ -2496,24 +2687,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
if (ret) {
kfree(metrics_v0);
return ret;
}
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_get_gpu_metrics(smu, table);
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
+ kfree(metrics_v0);
+ return num_bytes;
+ }
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
@@ -2623,13 +2819,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
+ gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -2666,6 +2865,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -2844,14 +3057,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x1)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2924,6 +3152,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3584,11 +3825,13 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
+ .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
@@ -3596,6 +3839,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 83745909e564..67b30674fd31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,6 +26,7 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
typedef enum {
/*0*/ METRICS_VERSION_V0 = 0,
@@ -51,6 +52,40 @@ struct PPTable_t {
bool Init;
};
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(PLDM_VERSION),
+ SMU_CAP(ALL),
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
+ bool bypass_cache);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
+ struct amdgpu_xcp *xcp, void *table,
+ void *smu_metrics);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c8f4f6fb4083..c96fa5e49ed6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
struct smu_13_0_dpm_table *dpm_table;
- struct smu_13_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+ uint8_t pcie_gen_cap,
+ uint8_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels;
+ int link_level;
+ uint32_t smu_pcie_arg;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+
+ num_of_levels = pcie_table->num_of_link_levels;
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 76c1adda83db..f9b0938c57ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 84f9b007b59f..fe00c84b1cc6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ u32 min,
+ u32 max,
+ bool __always_unused automatic)
{
- enum smu_message_type msg_set_min, msg_set_max;
- int ret = 0;
+ enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT;
+ enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT;
+ int ret = -EINVAL;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
return -EINVAL;
@@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
+ case SMU_ISPICLK:
+ msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq;
+ break;
+ case SMU_ISPXCLK:
+ msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq;
+ break;
default:
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
- if (ret)
- return ret;
+ if (min && msg_set_min != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+
+ if (max && msg_set_max != SMU_MSG_MAX_COUNT)
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
- return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ return ret;
}
static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
@@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
if (ret)
break;
- ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
break;
default:
ret = -EINVAL;
@@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ false);
if (ret)
return ret;
}
@@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_VCLK1,
vclk1_min,
- vclk1_max);
+ vclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
@@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
SMU_DCLK1,
dclk1_min,
- dclk1_max);
+ dclk1_max,
+ false);
if (ret)
return ret;
}
@@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_v14_0_0_set_isp_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile,
+ ISP_ALL_TILES_MASK, NULL);
+}
+
static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
bool enable)
{
@@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
@@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
.set_mall_enable = smu_v14_0_common_set_mall_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 82c2db972491..3aea32baea3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
struct smu_14_0_dpm_table *dpm_table;
- struct smu_14_0_pcie_table *pcie_table;
- uint32_t link_level;
int ret = 0;
/* socclk dpm table setup */
@@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- pcie_table = &dpm_context->dpm_tables.pcie_table;
- pcie_table->num_of_link_levels = 0;
- for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
- if (!skutable->PcieGenSpeed[link_level] &&
- !skutable->PcieLaneCount[link_level] &&
- !skutable->LclkFreq[link_level])
- continue;
-
- pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
- skutable->PcieGenSpeed[link_level];
- pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
- skutable->PcieLaneCount[link_level];
- pcie_table->clk_freq[pcie_table->num_of_link_levels] =
- skutable->LclkFreq[link_level];
- pcie_table->num_of_link_levels++;
-
- if (link_level == 0)
- link_level++;
- }
-
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
@@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
- int num_of_levels = pcie_table->num_of_link_levels;
+ int num_of_levels;
uint32_t smu_pcie_arg;
- int ret, i;
+ uint32_t link_level;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ int ret = 0;
+ int i;
+
+ pcie_table->num_of_link_levels = 0;
+ for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
+ if (!skutable->PcieGenSpeed[link_level] &&
+ !skutable->PcieLaneCount[link_level] &&
+ !skutable->LclkFreq[link_level])
+ continue;
+ pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
+ skutable->PcieGenSpeed[link_level];
+ pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
+ skutable->PcieLaneCount[link_level];
+ pcie_table->clk_freq[pcie_table->num_of_link_levels] =
+ skutable->LclkFreq[link_level];
+ pcie_table->num_of_link_levels++;
+ }
+ num_of_levels = pcie_table->num_of_link_levels;
if (!num_of_levels)
return 0;
@@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ break;
}
} else {
for (i = 0; i < num_of_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
- }
-
- for (i = 0; i < num_of_levels; i++) {
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
+ pcie_table->pcie_lane[i] > pcie_width_cap) {
+ pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
+ pcie_gen_cap : pcie_table->pcie_gen[i];
+ pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
+ pcie_width_cap : pcie_table->pcie_lane[i];
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
- if (ret)
- return ret;
+ if (ret)
+ break;
+ }
+ }
}
- return 0;
+ return ret;
}
static const struct smu_temperature_range smu14_thermal_policy[] = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 80eb1a03b3ca..59f9abd0f7b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
@@ -1051,73 +1061,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu)
false);
}
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
-{
- struct metrics_table_header *header = (struct metrics_table_header *)table;
- uint16_t structure_size;
-
-#define METRICS_VERSION(a, b) ((a << 16) | b)
-
- switch (METRICS_VERSION(frev, crev)) {
- case METRICS_VERSION(1, 0):
- structure_size = sizeof(struct gpu_metrics_v1_0);
- break;
- case METRICS_VERSION(1, 1):
- structure_size = sizeof(struct gpu_metrics_v1_1);
- break;
- case METRICS_VERSION(1, 2):
- structure_size = sizeof(struct gpu_metrics_v1_2);
- break;
- case METRICS_VERSION(1, 3):
- structure_size = sizeof(struct gpu_metrics_v1_3);
- break;
- case METRICS_VERSION(1, 4):
- structure_size = sizeof(struct gpu_metrics_v1_4);
- break;
- case METRICS_VERSION(1, 5):
- structure_size = sizeof(struct gpu_metrics_v1_5);
- break;
- case METRICS_VERSION(1, 6):
- structure_size = sizeof(struct gpu_metrics_v1_6);
- break;
- case METRICS_VERSION(1, 7):
- structure_size = sizeof(struct gpu_metrics_v1_7);
- break;
- case METRICS_VERSION(1, 8):
- structure_size = sizeof(struct gpu_metrics_v1_8);
- break;
- case METRICS_VERSION(2, 0):
- structure_size = sizeof(struct gpu_metrics_v2_0);
- break;
- case METRICS_VERSION(2, 1):
- structure_size = sizeof(struct gpu_metrics_v2_1);
- break;
- case METRICS_VERSION(2, 2):
- structure_size = sizeof(struct gpu_metrics_v2_2);
- break;
- case METRICS_VERSION(2, 3):
- structure_size = sizeof(struct gpu_metrics_v2_3);
- break;
- case METRICS_VERSION(2, 4):
- structure_size = sizeof(struct gpu_metrics_v2_4);
- break;
- case METRICS_VERSION(3, 0):
- structure_size = sizeof(struct gpu_metrics_v3_0);
- break;
- default:
- return;
- }
-
-#undef METRICS_VERSION
-
- memset(header, 0xFF, structure_size);
-
- header->format_revision = frev;
- header->content_revision = crev;
- header->structure_size = structure_size;
-
-}
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a020277dec3e..a608cdbdada4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,31 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
+ struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@@ -125,8 +150,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
int smu_cmn_get_combo_pptable(struct smu_context *smu);
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);