summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h62
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c98
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c61
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c83
23 files changed, 374 insertions, 170 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index a601024ba4de..fd79b213fab4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -66,6 +66,7 @@ static int smu_set_fan_control_mode(void *handle, u32 value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,6 +135,14 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
+int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+}
+
static u32 smu_get_mclk(void *handle, bool low)
{
struct smu_context *smu = handle;
@@ -1400,6 +1409,25 @@ static int smu_disable_dpms(struct smu_context *smu)
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/*
+ * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
+ * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
+ */
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ if (!(adev->in_runpm || amdgpu_in_reset(adev))) {
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+ if (ret) {
+ dev_err(adev->dev, "Fail set mp1 state to UNLOAD!\n");
+ return ret;
+ }
+ }
+ return 0;
+ default:
+ break;
+ }
+
+ /*
* For custom pptable uploading, skip the DPM features
* disable process on Navi1x ASICs.
* - As the gfx related features are under control of
@@ -1436,7 +1464,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
return 0;
default:
break;
@@ -2467,7 +2495,6 @@ static int smu_set_power_profile_mode(void *handle,
return smu_bump_power_profile_mode(smu, param, param_size);
}
-
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct smu_context *smu = handle;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a6a7b6c33683..b81c657c7386 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -432,6 +432,7 @@ struct smu_baco_context
{
uint32_t state;
bool platform_support;
+ bool maco_support;
};
struct smu_freq_info {
@@ -563,6 +564,10 @@ struct smu_context
struct stb_context stb_context;
struct firmware pptable_firmware;
+
+ u32 param_reg;
+ u32 msg_reg;
+ u32 resp_reg;
};
struct i2c_adapter;
@@ -698,6 +703,11 @@ struct pptable_funcs {
int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
+ */
+ int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
+
+ /**
* @read_sensor: Read data from a sensor.
* &sensor: Sensor to read data from.
* &data: Sensor reading.
@@ -1438,6 +1448,8 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
+
int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
index 45f5d29bc705..15b313baa0ee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
@@ -120,7 +120,7 @@
#define PPSMC_MSG_ReadSerialNumTop32 0x40
#define PPSMC_MSG_ReadSerialNumBottom32 0x41
-/* paramater for MSG_LightSBR
+/* parameter for MSG_LightSBR
* 1 -- Enable light secondary bus reset, only do nbio respond without further handling,
* leave driver to handle the real reset
* 0 -- Disable LightSBR, default behavior, SMU will pass the reset to PSP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 0f67c56c2863..7a6075daa7b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
@@ -519,7 +519,22 @@ typedef struct {
} EccInfo_t;
typedef struct {
- EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+
+ uint64_t mca_ceumc_addr;
+} EccInfo_V2_t;
+
+typedef struct {
+ union {
+ EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+ EccInfo_V2_t EccInfo_V2[ALDEBARAN_UMC_CHANNEL_NUM];
+ };
} EccInfoTable_t;
// These defines are used with the following messages:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index c1f76236da26..6a817c7ce110 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -1359,8 +1359,14 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy;
+ uint16_t dGPU_W_MAX;
+ uint16_t padding;
+
+ uint32_t MetricsCounter;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d99b4b47d49d..132da684e379 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -25,10 +25,10 @@
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x28
+#define SMU13_DRIVER_IF_VERSION 0x2A
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x1E
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -112,6 +112,22 @@
#define FEATURE_SPARE_63_BIT 63
#define NUM_FEATURES 64
+#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT)
+
//For use with feature control messages
typedef enum {
FEATURE_PWR_ALL,
@@ -662,7 +678,7 @@ typedef struct {
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
-#define PP_OD_FEATURE_VF_CURVE_BIT 0
+#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
#define PP_OD_FEATURE_VMAX_BIT 1
#define PP_OD_FEATURE_PPT_BIT 2
#define PP_OD_FEATURE_FAN_CURVE_BIT 3
@@ -671,6 +687,8 @@ typedef struct {
#define PP_OD_FEATURE_TDC_BIT 6
#define PP_OD_FEATURE_GFXCLK_BIT 7
#define PP_OD_FEATURE_UCLK_BIT 8
+#define PP_OD_FEATURE_ZERO_FAN_BIT 9
+#define PP_OD_FEATURE_TEMPERATURE_BIT 10
typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
@@ -689,8 +707,8 @@ typedef struct {
uint8_t RuntimePwrSavingFeaturesCtrl;
//Frequency changes
- uint16_t GfxclkFmin; // MHz
- uint16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -701,17 +719,17 @@ typedef struct {
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
+ uint8_t Padding[4];
- uint32_t Spare[13];
+ uint32_t Spare[12];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -740,17 +758,17 @@ typedef struct {
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
+ uint8_t MaxOpTemp;
+ uint8_t Padding[4];
-
- uint32_t Spare[13];
+ uint32_t Spare[12];
} OverDriveLimits_t;
@@ -1018,7 +1036,8 @@ typedef struct {
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
- uint32_t SpareVmin[12];
+ QuadraticInt_t Vmin_droop;
+ uint32_t SpareVmin[9];
//SECTION: DPM Configuration 1
@@ -1307,7 +1326,6 @@ typedef struct {
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
-
// SECTION: Board Reserved
uint32_t BoardSpare[64];
@@ -1382,8 +1400,14 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy ;
+ uint16_t dGPU_W_MAX ;
+ uint16_t padding ;
+
+ uint32_t MetricsCounter ;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
@@ -1415,11 +1439,13 @@ typedef struct {
uint16_t AverageUclkActivity_MAX;
+ uint32_t PublicSerialNumberLower;
+ uint32_t PublicSerialNumberUpper;
} SmuMetrics_t;
typedef struct {
SmuMetrics_t SmuMetrics;
- uint32_t Spare[32];
+ uint32_t Spare[30];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index acb3be292096..a9215494dcdd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -316,5 +316,7 @@ int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_v11_0_restore_user_od_settings(struct smu_context *smu);
+void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index afa1991e26f9..43de0a8d4bd9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,8 +30,8 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x28
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x28
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x29
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2A
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -43,6 +43,7 @@
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP1_V13_0_4_FIRMWARE_FLAGS 0x3010028
#define smnMP0_FW_INTF 0x30101c0
#define smnMP1_PUB_CTRL 0x3010b14
@@ -278,19 +279,7 @@ int smu_v13_0_run_btc(struct smu_context *smu);
int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement);
-int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
- bool enablement);
-
-bool smu_v13_0_baco_is_support(struct smu_context *smu);
-
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
-
-int smu_v13_0_baco_set_state(struct smu_context *smu,
- enum smu_baco_state state);
-
-int smu_v13_0_baco_enter(struct smu_context *smu);
-
-int smu_v13_0_baco_exit(struct smu_context *smu);
+int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu);
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
@@ -298,5 +287,9 @@ int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
uint32_t size);
int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
+
+void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+int smu_v13_0_mode1_reset(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 201563072189..445005571f76 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -573,12 +573,13 @@ static int arcturus_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
struct smu_11_0_dpm_table *dpm_table)
{
- int i, count;
+ uint32_t i;
- count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
- clocks->num_levels = count;
+ clocks->num_levels = min_t(uint32_t,
+ dpm_table->count,
+ (uint32_t)PP_MAX_CLOCK_LEVELS);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < clocks->num_levels; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
@@ -2509,4 +2510,5 @@ void arcturus_set_ppt_funcs(struct smu_context *smu)
smu->table_map = arcturus_table_map;
smu->pwr_src_map = arcturus_pwr_src_map;
smu->workload_map = arcturus_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index f1a4a720d426..ca4d97b7f576 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -591,4 +591,5 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
smu->message_map = cyan_skillfish_message_map;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 5f22fc3430f4..0bcd4fe0ef17 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3580,4 +3580,5 @@ void navi10_set_ppt_funcs(struct smu_context *smu)
smu->table_map = navi10_table_map;
smu->pwr_src_map = navi10_pwr_src_map;
smu->workload_map = navi10_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 78f3d9e722bb..b71860e5324a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1464,19 +1464,19 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
@@ -4192,7 +4192,7 @@ static int sienna_cichlid_get_default_config_table_settings(struct smu_context *
table->gfx_activity_average_tau = 10;
table->mem_activity_average_tau = 10;
table->socket_power_average_tau = 100;
- if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))
table->apu_socket_power_average_tau = 100;
return 0;
@@ -4357,4 +4357,5 @@ void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
smu->table_map = sienna_cichlid_table_map;
smu->pwr_src_map = sienna_cichlid_pwr_src_map;
smu->workload_map = sienna_cichlid_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5f8809f6990d..8f828ec76c35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -2197,3 +2197,12 @@ int smu_v11_0_restore_user_od_settings(struct smu_context *smu)
return ret;
}
+
+void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5551e1426ef5..e2d8ac90cf36 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -2213,4 +2213,5 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->table_map = vangogh_table_map;
smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 012e3bd99cc2..85e22210963f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -41,6 +41,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -1447,6 +1456,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
void renoir_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &renoir_ppt_funcs;
smu->message_map = renoir_message_map;
smu->clock_map = renoir_clk_map;
@@ -1454,4 +1465,7 @@ void renoir_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = renoir_workload_map;
smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index fb130409309c..619aee51b123 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -83,6 +83,12 @@
#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
/*
+ * SMU support mca_ceumc_addr in ECCTABLE since version 68.55.0,
+ * use this to check mca_ceumc_addr record whether support
+ */
+#define SUPPORT_ECCTABLE_V2_SMU_VERSION 0x00443700
+
+/*
* SMU support BAD CHENNEL info MSG since version 68.51.00,
* use this to check ECCTALE feature whether support
*/
@@ -549,12 +555,13 @@ static int aldebaran_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
struct smu_13_0_dpm_table *dpm_table)
{
- int i, count;
+ uint32_t i;
- count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
- clocks->num_levels = count;
+ clocks->num_levels = min_t(uint32_t,
+ dpm_table->count,
+ (uint32_t)PP_MAX_CLOCK_LEVELS);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < clocks->num_levels; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
@@ -739,7 +746,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
- uint32_t display_levels;
+ int display_levels;
uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk;
@@ -771,7 +778,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
return ret;
}
- display_levels = clocks.num_levels;
+ display_levels = (clocks.num_levels == 1) ? 1 : 2;
min_clk = pstate_table->gfxclk_pstate.curr.min;
max_clk = pstate_table->gfxclk_pstate.curr.max;
@@ -781,30 +788,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
/* fine-grained dpm has only 2 levels */
if (now > min_clk && now < max_clk) {
- display_levels = clocks.num_levels + 1;
+ display_levels++;
freq_values[2] = max_clk;
freq_values[1] = now;
}
- /*
- * For DPM disabled case, there will be only one clock level.
- * And it's safe to assume that is always the current clock.
- */
- if (display_levels == clocks.num_levels) {
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i],
- (clocks.num_levels == 1) ?
- "*" :
- (aldebaran_freqs_in_same_level(
- freq_values[i], now) ?
- "*" :
- ""));
- } else {
- for (i = 0; i < display_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i], i == 1 ? "*" : "");
- }
+ for (i = 0; i < display_levels; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (display_levels == 1) ?
+ "*" :
+ (aldebaran_freqs_in_same_level(
+ freq_values[i], now) ?
+ "*" :
+ ""));
break;
@@ -1803,7 +1800,8 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
-static int aldebaran_check_ecc_table_support(struct smu_context *smu)
+static int aldebaran_check_ecc_table_support(struct smu_context *smu,
+ int *ecctable_version)
{
uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
@@ -1816,6 +1814,11 @@ static int aldebaran_check_ecc_table_support(struct smu_context *smu)
if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
ret = -EOPNOTSUPP;
+ else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
+ smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
+ *ecctable_version = 1;
+ else
+ *ecctable_version = 2;
return ret;
}
@@ -1827,9 +1830,10 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
EccInfoTable_t *ecc_table = NULL;
struct ecc_info_per_ch *ecc_info_per_channel = NULL;
int i, ret = 0;
+ int table_version = 0;
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
- ret = aldebaran_check_ecc_table_support(smu);
+ ret = aldebaran_check_ecc_table_support(smu, &table_version);
if (ret)
return ret;
@@ -1845,16 +1849,33 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
- for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
- ecc_info_per_channel = &(eccinfo->ecc[i]);
- ecc_info_per_channel->ce_count_lo_chip =
- ecc_table->EccInfo[i].ce_count_lo_chip;
- ecc_info_per_channel->ce_count_hi_chip =
- ecc_table->EccInfo[i].ce_count_hi_chip;
- ecc_info_per_channel->mca_umc_status =
- ecc_table->EccInfo[i].mca_umc_status;
- ecc_info_per_channel->mca_umc_addr =
- ecc_table->EccInfo[i].mca_umc_addr;
+ if (table_version == 1) {
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+ } else if (table_version == 2) {
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo_V2[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo_V2[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo_V2[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo_V2[i].mca_umc_addr;
+ ecc_info_per_channel->mca_ceumc_addr =
+ ecc_table->EccInfo_V2[i].mca_ceumc_addr;
+ }
+ eccinfo->record_ce_addr_supported = 1;
}
return ret;
@@ -2117,4 +2138,5 @@ void aldebaran_set_ppt_funcs(struct smu_context *smu)
smu->clock_map = aldebaran_clk_map;
smu->feature_map = aldebaran_feature_mask_map;
smu->table_map = aldebaran_table_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ef9b56de143b..8342703ce7d6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -60,6 +60,15 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
#define SMU13_VOLTAGE_SCALE 4
#define LINK_WIDTH_MAX 6
@@ -264,8 +273,16 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ default:
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ }
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@ -714,6 +731,8 @@ int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
} else if ((frev == 4) && (crev == 0)) {
smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
@@ -1067,10 +1086,7 @@ int smu_v13_0_set_power_limit(struct smu_context *smu,
int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
- if (smu->smu_table.thermal_controller_type)
- return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
-
- return 0;
+ return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
}
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
@@ -2257,7 +2273,8 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- 0,
+ smu_baco->maco_support ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
ret = smu_cmn_send_smc_msg(smu,
@@ -2297,6 +2314,16 @@ int smu_v13_0_baco_exit(struct smu_context *smu)
SMU_BACO_STATE_EXIT);
}
+int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+
+ return smu_cmn_send_msg_without_waiting(smu, index, 0);
+}
+
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
@@ -2386,3 +2413,23 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
smu_table->clocks_table, false);
}
+
+void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_v13_0_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 7432b3e76d3d..ce2fa04e3926 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -117,6 +117,8 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -300,6 +302,14 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
+
return 0;
}
@@ -317,6 +327,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+ smu_baco->maco_support = true;
+
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -840,7 +853,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_0_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
@@ -1576,6 +1589,23 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
+static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ /* PMFW support is available since 78.41 */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x004e2900)
+ return false;
+
+ return true;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -1638,6 +1668,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
+ .set_mp1_state = smu_cmn_set_mp1_state,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
@@ -1649,4 +1682,5 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_0_table_map;
smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
smu->workload_map = smu_v13_0_0_workload_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 5a17b51aa0f9..82d3718d8324 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -43,6 +43,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 1
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 1
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 1
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
@@ -210,15 +219,10 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /* SMU fw need this message to trigger IMU to complete the initialization */
- if (en)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxImu, NULL);
- else {
- if (!adev->in_s0ix)
- ret = smu_cmn_send_smc_msg(smu,
- SMU_MSG_PrepareMp1ForUnload,
- NULL);
- }
+
+ if (!en && !adev->in_s0ix)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
return ret;
}
@@ -1030,13 +1034,19 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.force_clk_levels = smu_v13_0_4_force_clk_levels,
.set_performance_level = smu_v13_0_4_set_performance_level,
.set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters,
+ .set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
};
void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v13_0_4_ppt_funcs;
smu->message_map = smu_v13_0_4_message_map;
smu->feature_map = smu_v13_0_4_feature_mask_map;
smu->table_map = smu_v13_0_4_table_map;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index b81711c4ff33..47360ef5c175 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -42,6 +42,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_2_BASE_IDX 0
+
+#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_34_BASE_IDX 0
+
+#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_33_BASE_IDX 0
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -1049,9 +1058,14 @@ static const struct pptable_funcs smu_v13_0_5_ppt_funcs = {
void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v13_0_5_ppt_funcs;
smu->message_map = smu_v13_0_5_message_map;
smu->feature_map = smu_v13_0_5_feature_mask_map;
smu->table_map = smu_v13_0_5_table_map;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_34);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_2);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_33);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 4e1861fb2c6a..193222fdd1c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -97,6 +97,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
@@ -115,6 +116,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -248,6 +250,9 @@ smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
@@ -281,6 +286,7 @@ smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
@@ -298,6 +304,8 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_13_0_7_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ BoardTable_t *BoardTable = &smc_pptable->BoardTable;
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -306,6 +314,9 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
+ if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+ smu_baco->maco_support = true;
+
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -1541,6 +1552,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.load_microcode = smu_v13_0_load_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
.init_power = smu_v13_0_init_power,
+ .fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
.setup_pptable = smu_v13_0_7_setup_pptable,
.check_fw_version = smu_v13_0_check_fw_version,
@@ -1583,6 +1595,12 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .baco_is_support = smu_v13_0_baco_is_support,
+ .baco_get_state = smu_v13_0_baco_get_state,
+ .baco_set_state = smu_v13_0_baco_set_state,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
+ .set_mp1_state = smu_cmn_set_mp1_state,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
@@ -1594,4 +1612,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_7_table_map;
smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
smu->workload_map = smu_v13_0_7_workload_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index feff4f8c927c..70cbc46341a3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1203,4 +1203,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map;
smu->is_apu = true;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 5de7da75d14a..15e4298c7cc8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -37,31 +37,6 @@
#undef pr_info
#undef pr_debug
-/*
- * Although these are defined in each ASIC's specific header file.
- * They share the same definitions and values. That makes common
- * APIs for SMC messages issuing for all ASICs possible.
- */
-#define mmMP1_SMN_C2PMSG_66 0x0282
-#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_82 0x0292
-#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_90 0x029a
-#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
-
-/* SMU 13.0.5 has its specific mailbox messaging registers */
-
-#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_2_BASE_IDX 0
-
-#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_34_BASE_IDX 0
-
-#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_33_BASE_IDX 0
-
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
#undef __SMU_DUMMY_MAP
@@ -90,10 +65,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
- *arg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
- else
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32(smu->param_reg);
}
/* Redefine the SMU error codes here.
@@ -139,10 +111,7 @@ static u32 __smu_cmn_poll_stat(struct smu_context *smu)
u32 reg;
for ( ; timeout > 0; timeout--) {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
- reg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33);
- else
- reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ reg = RREG32(smu->resp_reg);
if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
break;
@@ -164,13 +133,8 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
switch (reg_c2pmsg_90) {
case SMU_RESP_NONE: {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
- msg_idx = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2);
- prm = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
- } else {
- msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
- prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
- }
+ msg_idx = RREG32(smu->msg_reg);
+ prm = RREG32(smu->param_reg);
dev_err_ratelimited(adev->dev,
"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
msg_idx, prm);
@@ -264,16 +228,9 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33, 0);
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34, param);
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2, msg);
- } else {
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
- }
-
+ WREG32(smu->resp_reg, 0);
+ WREG32(smu->param_reg, param);
+ WREG32(smu->msg_reg, msg);
}
/**
@@ -725,16 +682,13 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
+ int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
uint64_t feature_mask;
- int feature_index = 0;
+ int i, feature_index;
uint32_t count = 0;
- int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
- int ret = 0, i;
- int feature_id;
- ret = __smu_get_enabled_features(smu, &feature_mask);
- if (ret)
+ if (__smu_get_enabled_features(smu, &feature_mask))
return 0;
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
@@ -755,22 +709,15 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
"No", "Feature", "Bit", "State");
- for (i = 0; i < SMU_FEATURE_COUNT; i++) {
- if (sort_feature[i] < 0)
- continue;
-
- /* convert to asic spcific feature ID */
- feature_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_FEATURE,
- sort_feature[i]);
- if (feature_id < 0)
+ for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
+ if (sort_feature[feature_index] < 0)
continue;
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
- smu_get_feature_name(smu, sort_feature[i]),
- i,
- !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
+ smu_get_feature_name(smu, sort_feature[feature_index]),
+ feature_index,
+ !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
"enabled" : "disabled");
}