summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c577
1 files changed, 338 insertions, 239 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index ee1a312fd497..4040ff926544 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -37,32 +37,26 @@
#undef pr_info
#undef pr_debug
-/*
- * Although these are defined in each ASIC's specific header file.
- * They share the same definitions and values. That makes common
- * APIs for SMC messages issuing for all ASICs possible.
- */
-#define mmMP1_SMN_C2PMSG_66 0x0282
-#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_82 0x0292
-#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_90 0x029a
-#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
-
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+const int link_speed[] = {25, 50, 80, 160, 320, 640};
+
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
static const char * const __smu_message_names[] = {
SMU_MESSAGE_TYPES
};
+#define smu_cmn_call_asic_func(intf, smu, args...) \
+ ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
+ (smu)->ppt_funcs->intf(smu, ##args) : \
+ -ENOTSUPP) : \
+ -EINVAL)
+
static const char *smu_get_message_name(struct smu_context *smu,
enum smu_message_type type)
{
- if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+ if (type >= SMU_MSG_MAX_COUNT)
return "unknown smu message";
return __smu_message_names[type];
@@ -73,7 +67,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32(smu->param_reg);
}
/* Redefine the SMU error codes here.
@@ -92,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
#define SMU_RESP_BUSY_OTHER 0xFC
#define SMU_RESP_DEBUG_END 0xFB
+#define SMU_RESP_UNEXP (~0U)
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
* @smu: a pointer to SMU context
@@ -119,7 +114,7 @@ static u32 __smu_cmn_poll_stat(struct smu_context *smu)
u32 reg;
for ( ; timeout > 0; timeout--) {
- reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ reg = RREG32(smu->resp_reg);
if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
break;
@@ -137,15 +132,16 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
const char *message = smu_get_message_name(smu, msg);
+ u32 msg_idx, prm;
switch (reg_c2pmsg_90) {
case SMU_RESP_NONE: {
- u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
- u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ msg_idx = RREG32(smu->msg_reg);
+ prm = RREG32(smu->param_reg);
dev_err_ratelimited(adev->dev,
"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
msg_idx, prm);
- }
+ }
break;
case SMU_RESP_OK:
/* The SMU executed the command. It completed with a
@@ -168,14 +164,27 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
msg_index, param, message);
break;
case SMU_RESP_BUSY_OTHER:
- dev_err_ratelimited(adev->dev,
- "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
- msg_index, param, message);
+ /* It is normal for SMU_MSG_GetBadPageCount to return busy
+ * so don't print error at this case.
+ */
+ if (msg != SMU_MSG_GetBadPageCount)
+ dev_err_ratelimited(adev->dev,
+ "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
+ msg_index, param, message);
break;
case SMU_RESP_DEBUG_END:
dev_err_ratelimited(adev->dev,
"SMU: I'm debugging!");
break;
+ case SMU_RESP_UNEXP:
+ if (amdgpu_device_bus_status_check(smu->adev)) {
+ /* print error immediately if device is off the bus */
+ dev_err(adev->dev,
+ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
+ reg_c2pmsg_90, msg_index, param, message);
+ break;
+ }
+ fallthrough;
default:
dev_err_ratelimited(adev->dev,
"SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
@@ -235,11 +244,71 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ WREG32(smu->resp_reg, 0);
+ WREG32(smu->param_reg, param);
+ WREG32(smu->msg_reg, msg);
+}
+
+static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu,
+ enum smu_message_type msg)
+{
+ return smu->message_map[msg].flags;
}
+static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
+ enum smu_message_type msg, bool *poll)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t flags, resp;
+ bool fed_status, pri;
+
+ flags = __smu_cmn_get_msg_flags(smu, msg);
+ *poll = true;
+
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
+ /* When there is RAS fatal error, FW won't process non-RAS priority
+ * messages. Don't allow any messages other than RAS priority messages.
+ */
+ fed_status = amdgpu_ras_get_fed_status(adev);
+ if (fed_status) {
+ if (!(flags & SMU_MSG_RAS_PRI)) {
+ dev_dbg(adev->dev,
+ "RAS error detected, skip sending %s",
+ smu_get_message_name(smu, msg));
+ return -EACCES;
+ }
+ }
+
+ if (pri || fed_status) {
+ /* FW will ignore non-priority messages when a RAS fatal error
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
+ */
+ resp = RREG32(smu->resp_reg);
+ dev_dbg(adev->dev,
+ "Sending priority message %s response status: %x",
+ smu_get_message_name(smu, msg), resp);
+ if (resp == 0)
+ *poll = false;
+ }
+
+ return 0;
+}
+
+static int __smu_cmn_send_debug_msg(struct smu_context *smu,
+ u32 msg,
+ u32 param)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32(smu->debug_param_reg, param);
+ WREG32(smu->debug_msg_reg, msg);
+ WREG32(smu->debug_resp_reg, 0);
+
+ return 0;
+}
/**
* smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
* @smu: pointer to an SMU context
@@ -264,12 +333,21 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
if (adev->no_hw_access)
return 0;
- reg = __smu_cmn_poll_stat(smu);
- res = __smu_cmn_reg2errno(smu, reg);
- if (reg == SMU_RESP_NONE ||
- reg == SMU_RESP_BUSY_OTHER ||
- res == -EREMOTEIO)
+ if (smu->smc_fw_state == SMU_FW_HANG) {
+ dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
+ res = -EREMOTEIO;
goto Out;
+ }
+
+ if (smu->smc_fw_state == SMU_FW_INIT) {
+ smu->smc_fw_state = SMU_FW_RUNTIME;
+ } else {
+ reg = __smu_cmn_poll_stat(smu);
+ res = __smu_cmn_reg2errno(smu, reg);
+ if (reg == SMU_RESP_NONE || res == -EREMOTEIO)
+ goto Out;
+ }
+
__smu_cmn_send_msg(smu, msg_index, param);
res = 0;
Out:
@@ -300,6 +378,9 @@ int smu_cmn_wait_for_response(struct smu_context *smu)
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
+ if (res == -EREMOTEIO)
+ smu->smc_fw_state = SMU_FW_HANG;
+
if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
res && (res != -ETIME)) {
amdgpu_device_halt(smu->adev);
@@ -321,9 +402,11 @@ int smu_cmn_wait_for_response(struct smu_context *smu)
* completion of the command, and return back a value from the SMU in
* @read_arg pointer.
*
- * Return 0 on success, -errno on error, if we weren't able to send
- * the message or if the message completed with some kind of
- * error. See __smu_cmn_reg2errno() for details of the -errno.
+ * Return 0 on success, -errno when a problem is encountered sending
+ * message or receiving reply. If there is a PCI bus recovery or
+ * the destination is a virtual GPU which does not allow this message
+ * type, the message is simply dropped and success is also returned.
+ * See __smu_cmn_reg2errno() for details of the -errno.
*
* If we weren't able to send the message to the SMU, we also print
* the error to the standard log.
@@ -346,6 +429,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
int res, index;
+ bool poll = true;
u32 reg;
if (adev->no_hw_access)
@@ -358,21 +442,47 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- reg = __smu_cmn_poll_stat(smu);
- res = __smu_cmn_reg2errno(smu, reg);
- if (reg == SMU_RESP_NONE ||
- reg == SMU_RESP_BUSY_OTHER ||
- res == -EREMOTEIO) {
- __smu_cmn_reg_print_error(smu, reg, index, param, msg);
+
+ if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) {
+ res = __smu_cmn_ras_filter_msg(smu, msg, &poll);
+ if (res)
+ goto Out;
+ }
+
+ if (smu->smc_fw_state == SMU_FW_HANG) {
+ dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n");
+ res = -EREMOTEIO;
goto Out;
+ } else if (smu->smc_fw_state == SMU_FW_INIT) {
+ /* Ignore initial smu response register value */
+ poll = false;
+ smu->smc_fw_state = SMU_FW_RUNTIME;
+ }
+
+ if (poll) {
+ reg = __smu_cmn_poll_stat(smu);
+ res = __smu_cmn_reg2errno(smu, reg);
+ if (reg == SMU_RESP_NONE || res == -EREMOTEIO) {
+ __smu_cmn_reg_print_error(smu, reg, index, param, msg);
+ goto Out;
+ }
}
__smu_cmn_send_msg(smu, (uint16_t) index, param);
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
- if (res != 0)
+ if (res != 0) {
+ if (res == -EREMOTEIO)
+ smu->smc_fw_state = SMU_FW_HANG;
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
- if (read_arg)
+ }
+ if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg, *read_arg);
+ } else {
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
+ smu_get_message_name(smu, msg), index, param, reg);
+ }
Out:
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
amdgpu_device_halt(adev);
@@ -393,6 +503,18 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
read_arg);
}
+int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
+ uint32_t msg)
+{
+ return __smu_cmn_send_debug_msg(smu, msg, 0);
+}
+
+int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
+ uint32_t msg, uint32_t param)
+{
+ return __smu_cmn_send_debug_msg(smu, msg, param);
+}
+
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index)
@@ -411,7 +533,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return -EINVAL;
if (amdgpu_sriov_vf(smu->adev) &&
- !msg_mapping.valid_in_vf)
+ !(msg_mapping.flags & SMU_MSG_VF_FLAG))
return -EACCES;
return msg_mapping.map_to;
@@ -461,13 +583,13 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_WORKLOAD:
- if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
+ if (index >= PP_SMC_POWER_PROFILE_COUNT ||
!smu->workload_map)
return -EINVAL;
mapping = smu->workload_map[index];
if (!mapping.valid_mapping)
- return -EINVAL;
+ return -ENOTSUPP;
return mapping.map_to;
@@ -481,7 +603,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- int ret = 0;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
@@ -491,22 +612,33 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->supported);
- mutex_unlock(&feature->mutex);
+ return test_bit(feature_id, feature->supported);
+}
- return ret;
+static int __smu_get_enabled_features(struct smu_context *smu,
+ uint64_t *enabled_features)
+{
+ return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
}
int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
+ uint64_t enabled_features;
int feature_id;
- int ret = 0;
- if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
+ if (__smu_get_enabled_features(smu, &enabled_features)) {
+ dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
+ return 0;
+ }
+
+ /*
+ * For Renoir and Cyan Skillfish, they are assumed to have all features
+ * enabled. Also considering they have no feature_map available, the
+ * check here can avoid unwanted feature_map check below.
+ */
+ if (enabled_features == ULLONG_MAX)
return 1;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -515,13 +647,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
if (feature_id < 0)
return 0;
- WARN_ON(feature_id > feature->feature_num);
-
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->enabled);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, (unsigned long *)&enabled_features);
}
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
@@ -541,6 +667,17 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
case SMU_SOCCLK:
feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ feature_id = SMU_FEATURE_DPM_VCLK_BIT;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ feature_id = SMU_FEATURE_DPM_DCLK_BIT;
+ break;
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
default:
return true;
}
@@ -552,70 +689,46 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
}
int smu_cmn_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
+ uint64_t *feature_mask)
{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
+ uint32_t *feature_mask_high;
+ uint32_t *feature_mask_low;
+ int ret = 0, index = 0;
- if (!feature_mask || num < 2)
+ if (!feature_mask)
return -EINVAL;
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
+ feature_mask_low = &((uint32_t *)feature_mask)[0];
+ feature_mask_high = &((uint32_t *)feature_mask)[1];
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
+ index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GetEnabledSmuFeatures);
+ if (index > 0) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 0,
+ feature_mask_low);
if (ret)
return ret;
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 1,
+ feature_mask_high);
} else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
- }
-
- return ret;
-}
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- uint32_t feature_mask_en_low = 0;
- uint32_t feature_mask_en_high = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
- &feature_mask_en_low);
-
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ feature_mask_high);
if (ret)
return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
- &feature_mask_en_high);
-
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_en_low;
- feature_mask[1] = feature_mask_en_high;
-
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ feature_mask_low);
}
return ret;
-
}
uint64_t smu_cmn_get_indep_throttler_status(
@@ -635,7 +748,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
{
- struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (enabled) {
@@ -649,8 +761,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
} else {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow,
@@ -662,19 +772,8 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
}
- mutex_lock(&feature->mutex);
- if (enabled)
- bitmap_or(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- else
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
return ret;
}
@@ -682,7 +781,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable)
{
- struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -691,8 +789,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
if (feature_id < 0)
return -EINVAL;
- WARN_ON(feature_id > feature->feature_num);
-
return smu_cmn_feature_update_enable_state(smu,
1ULL << feature_id,
enable);
@@ -700,14 +796,14 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(fea) #fea
-static const char* __smu_feature_names[] = {
+static const char *__smu_feature_names[] = {
SMU_FEATURE_MASKS
};
static const char *smu_get_feature_name(struct smu_context *smu,
enum smu_feature_mask feature)
{
- if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+ if (feature >= SMU_FEATURE_COUNT)
return "unknown smu feature";
return __smu_feature_names[feature];
}
@@ -715,29 +811,17 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
- uint32_t feature_mask[2] = { 0 };
- int feature_index = 0;
+ int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
+ uint64_t feature_mask;
+ int i, feature_index;
uint32_t count = 0;
- int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
- int ret = 0, i;
- if (!smu->is_apu) {
- ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- } else {
- ret = smu_cmn_get_enabled_32_bits_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- }
+ if (__smu_get_enabled_features(smu, &feature_mask))
+ return 0;
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
- feature_mask[1], feature_mask[0]);
+ upper_32_bits(feature_mask), lower_32_bits(feature_mask));
memset(sort_feature, -1, sizeof(sort_feature));
@@ -754,15 +838,15 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
"No", "Feature", "Bit", "State");
- for (i = 0; i < SMU_FEATURE_COUNT; i++) {
- if (sort_feature[i] < 0)
+ for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
+ if (sort_feature[feature_index] < 0)
continue;
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
- smu_get_feature_name(smu, sort_feature[i]),
- i,
- !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+ smu_get_feature_name(smu, sort_feature[feature_index]),
+ feature_index,
+ !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
"enabled" : "disabled");
}
@@ -773,22 +857,16 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask)
{
int ret = 0;
- uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_mask;
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
- uint64_t feature_enables = 0;
- ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
+ ret = __smu_get_enabled_features(smu, &feature_mask);
if (ret)
return ret;
- feature_enables = ((uint64_t)feature_mask[1] << 32 |
- (uint64_t)feature_mask[0]);
-
- feature_2_enabled = ~feature_enables & new_mask;
- feature_2_disabled = feature_enables & ~new_mask;
+ feature_2_enabled = ~feature_mask & new_mask;
+ feature_2_disabled = feature_mask & ~new_mask;
if (feature_2_enabled) {
ret = smu_cmn_feature_update_enable_state(smu,
@@ -814,9 +892,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* @mask
*
* @smu: smu_context pointer
- * @no_hw_disablement: whether real dpm disablement should be performed
- * true: update the cache(about dpm enablement state) only
- * false: real dpm disablement plus cache update
* @mask: the dpm feature which should not be disabled
* SMU_FEATURE_COUNT: no exception, all dpm features
* to disable
@@ -825,10 +900,8 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* 0 on success or a negative error code on failure.
*/
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
uint64_t features_to_disable = U64_MAX;
int skipped_feature_id;
@@ -842,18 +915,9 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
features_to_disable &= ~(1ULL << skipped_feature_id);
}
- if (no_hw_disablement) {
- mutex_lock(&feature->mutex);
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
- return 0;
- } else {
- return smu_cmn_feature_update_enable_state(smu,
- features_to_disable,
- 0);
- }
+ return smu_cmn_feature_update_enable_state(smu,
+ features_to_disable,
+ 0);
}
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -909,7 +973,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
@@ -920,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu,
* Flush hdp cache: to guard the content seen by
* GPU is consitent with CPU.
*/
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
}
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
@@ -932,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu,
return ret;
if (!drv2smu) {
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table_data, table->cpu_addr, table_size);
}
@@ -964,11 +1028,11 @@ int smu_cmn_write_pptable(struct smu_context *smu)
true);
}
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size =
smu_table->tables[SMU_TABLE_SMU_METRICS].size;
int ret = 0;
@@ -994,62 +1058,15 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
return 0;
}
-int smu_cmn_get_metrics_table(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
-{
- int ret = 0;
-
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- metrics_table,
- bypass_cache);
- mutex_unlock(&smu->metrics_lock);
-
- return ret;
-}
-
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
+int smu_cmn_get_combo_pptable(struct smu_context *smu)
{
- struct metrics_table_header *header = (struct metrics_table_header *)table;
- uint16_t structure_size;
-
-#define METRICS_VERSION(a, b) ((a << 16) | b )
-
- switch (METRICS_VERSION(frev, crev)) {
- case METRICS_VERSION(1, 0):
- structure_size = sizeof(struct gpu_metrics_v1_0);
- break;
- case METRICS_VERSION(1, 1):
- structure_size = sizeof(struct gpu_metrics_v1_1);
- break;
- case METRICS_VERSION(1, 2):
- structure_size = sizeof(struct gpu_metrics_v1_2);
- break;
- case METRICS_VERSION(1, 3):
- structure_size = sizeof(struct gpu_metrics_v1_3);
- break;
- case METRICS_VERSION(2, 0):
- structure_size = sizeof(struct gpu_metrics_v2_0);
- break;
- case METRICS_VERSION(2, 1):
- structure_size = sizeof(struct gpu_metrics_v2_1);
- break;
- case METRICS_VERSION(2, 2):
- structure_size = sizeof(struct gpu_metrics_v2_2);
- break;
- default:
- return;
- }
-
-#undef METRICS_VERSION
-
- memset(header, 0xFF, structure_size);
-
- header->format_revision = frev;
- header->content_revision = crev;
- header->structure_size = structure_size;
+ void *pptable = smu->smu_table.combo_pptable;
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_COMBO_PPTABLE,
+ 0,
+ pptable,
+ false);
}
int smu_cmn_set_mp1_state(struct smu_context *smu,
@@ -1100,3 +1117,85 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
return snd_driver_loaded;
}
+
+static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
+{
+ if (level < 0 || !(policy->level_mask & BIT(level)))
+ return "Invalid";
+
+ switch (level) {
+ case SOC_PSTATE_DEFAULT:
+ return "soc_pstate_default";
+ case SOC_PSTATE_0:
+ return "soc_pstate_0";
+ case SOC_PSTATE_1:
+ return "soc_pstate_1";
+ case SOC_PSTATE_2:
+ return "soc_pstate_2";
+ }
+
+ return "Invalid";
+}
+
+static struct smu_dpm_policy_desc pstate_policy_desc = {
+ .name = STR_SOC_PSTATE_POLICY,
+ .get_desc = smu_soc_policy_get_desc,
+};
+
+void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
+{
+ policy->desc = &pstate_policy_desc;
+}
+
+static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
+ int level)
+{
+ if (level < 0 || !(policy->level_mask & BIT(level)))
+ return "Invalid";
+
+ switch (level) {
+ case XGMI_PLPD_DISALLOW:
+ return "plpd_disallow";
+ case XGMI_PLPD_DEFAULT:
+ return "plpd_default";
+ case XGMI_PLPD_OPTIMIZED:
+ return "plpd_optimized";
+ }
+
+ return "Invalid";
+}
+
+static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
+ .name = STR_XGMI_PLPD_POLICY,
+ .get_desc = smu_xgmi_plpd_policy_get_desc,
+};
+
+void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
+{
+ policy->desc = &xgmi_plpd_policy_desc;
+}
+
+void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
+ u32 workload_mask,
+ u32 *backend_workload_mask)
+{
+ int workload_type;
+ u32 profile_mode;
+
+ *backend_workload_mask = 0;
+
+ for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
+ if (!(workload_mask & (1 << profile_mode)))
+ continue;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+
+ if (workload_type < 0)
+ continue;
+
+ *backend_workload_mask |= 1 << workload_type;
+ }
+}