summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c244
1 files changed, 207 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 380b89114341..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -28,6 +28,13 @@
#include "amdgpu.h"
#include "amdgpu_ucode.h"
+#define AMDGPU_UCODE_NAME_MAX (128)
+
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -126,19 +133,6 @@ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
}
}
-void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr)
-{
- uint16_t version_major = le16_to_cpu(hdr->header_version_major);
- uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
-
- DRM_DEBUG("IMU\n");
- amdgpu_ucode_print_common_hdr(hdr);
-
- if (version_major != 1) {
- DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
- }
-}
-
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
{
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
@@ -336,6 +330,12 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
DRM_DEBUG("ctl_ucode_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_ucode_offset));
DRM_DEBUG("ctl_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_offset));
DRM_DEBUG("ctl_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_size));
+ } else if (version_major == 3) {
+ const struct sdma_firmware_header_v3_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v3_0, header);
+
+ DRM_DEBUG("ucode_reversion: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
} else {
DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
version_major, version_minor);
@@ -472,6 +472,12 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n",
le32_to_cpu(desc->size_bytes));
break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ DRM_DEBUG("psp_ras_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_ras_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
default:
DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type);
break;
@@ -563,6 +569,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
default:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
+ else if (load_type == 3)
+ return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
else
return AMDGPU_FW_LOAD_PSP;
}
@@ -649,6 +657,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "SMC";
case AMDGPU_UCODE_ID_PPTABLE:
return "PPTABLE";
+ case AMDGPU_UCODE_ID_P2S_TABLE:
+ return "P2STABLE";
case AMDGPU_UCODE_ID_UVD:
return "UVD";
case AMDGPU_UCODE_ID_UVD1:
@@ -669,20 +679,72 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "VCN1_RAM";
case AMDGPU_UCODE_ID_DMCUB:
return "DMCUB";
+ case AMDGPU_UCODE_ID_CAP:
+ return "CAP";
+ case AMDGPU_UCODE_ID_VPE_CTX:
+ return "VPE_CTX";
+ case AMDGPU_UCODE_ID_VPE_CTL:
+ return "VPE_CTL";
+ case AMDGPU_UCODE_ID_VPE:
+ return "VPE";
+ case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
+ return "UMSCH_MM_UCODE";
+ case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
+ return "UMSCH_MM_DATA";
+ case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
+ return "UMSCH_MM_CMD_BUFFER";
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ return "JPEG";
+ case AMDGPU_UCODE_ID_SDMA_RS64:
+ return "RS64_SDMA";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ return "RS64_PFP";
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ return "RS64_ME";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ return "RS64_MEC";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ return "RS64_PFP_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ return "RS64_PFP_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ return "RS64_ME_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ return "RS64_ME_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ return "RS64_MEC_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ return "RS64_MEC_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ return "RS64_MEC_P2_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ return "RS64_MEC_P3_STACK";
+ case AMDGPU_UCODE_ID_ISP:
+ return "ISP";
default:
return "UNKNOWN UCODE";
}
}
+static inline int amdgpu_ucode_is_valid(uint32_t fw_version)
+{
+ if (!fw_version)
+ return -EINVAL;
+
+ return 0;
+}
+
#define FW_VERSION_ATTR(name, mode, field) \
static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+ struct device_attribute *attr, char *buf) \
{ \
struct drm_device *ddev = dev_get_drvdata(dev); \
struct amdgpu_device *adev = drm_to_adev(ddev); \
\
- return sysfs_emit(buf, "0x%08x\n", adev->field); \
+ if (!buf) \
+ return amdgpu_ucode_is_valid(adev->field); \
+ \
+ return sysfs_emit(buf, "0x%08x\n", adev->field); \
} \
static DEVICE_ATTR(name, mode, show_##name, NULL)
@@ -708,6 +770,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
+FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -720,13 +786,30 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
- &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
+#define to_dev_attr(x) container_of(x, struct device_attribute, attr)
+
+static umode_t amdgpu_ucode_sys_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev_attr->show(dev, dev_attr, NULL) == -EINVAL)
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group fw_attr_group = {
.name = "fw_version",
- .attrs = fw_attrs
+ .attrs = fw_attrs,
+ .is_visible = amdgpu_ucode_sys_visible
};
int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev)
@@ -750,10 +833,13 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL;
+ const struct sdma_firmware_header_v3_0 *sdmav3_hdr = NULL;
const struct imu_firmware_header_v1_0 *imu_hdr = NULL;
+ const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL;
+ const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL;
u8 *ucode_addr;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
ucode->mc_addr = mc_addr;
@@ -769,7 +855,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data;
+ sdmav3_hdr = (const struct sdma_firmware_header_v3_0 *)ucode->fw->data;
imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data;
+ vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data;
+ umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
switch (ucode->ucode_id) {
@@ -783,6 +872,11 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode_addr = (u8 *)ucode->fw->data +
le32_to_cpu(sdma_hdr->ctl_ucode_offset);
break;
+ case AMDGPU_UCODE_ID_SDMA_RS64:
+ ucode->ucode_size = le32_to_cpu(sdmav3_hdr->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(sdmav3_hdr->header.ucode_array_offset_bytes);
+ break;
case AMDGPU_UCODE_ID_CP_MEC1:
case AMDGPU_UCODE_ID_CP_MEC2:
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
@@ -886,6 +980,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_size = ucode->fw->size;
ucode_addr = (u8 *)ucode->fw->data;
break;
+ case AMDGPU_UCODE_ID_P2S_TABLE:
+ ucode->ucode_size = ucode->fw->size;
+ ucode_addr = (u8 *)ucode->fw->data;
+ break;
case AMDGPU_UCODE_ID_IMU_I:
ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);
ucode_addr = (u8 *)ucode->fw->data +
@@ -952,6 +1050,26 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode_addr = (u8 *)ucode->fw->data +
le32_to_cpu(cpv2_hdr->data_offset_bytes);
break;
+ case AMDGPU_UCODE_ID_VPE_CTX:
+ ucode->ucode_size = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_VPE_CTL:
+ ucode->ucode_size = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(vpe_hdr->ctl_ucode_offset);
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
+ ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(umsch_mm_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
+ ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes);
+ break;
default:
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
ucode_addr = (u8 *)ucode->fw->data +
@@ -977,7 +1095,7 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
uint8_t *src_addr = NULL;
uint8_t *dst_addr = NULL;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
@@ -995,9 +1113,11 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
+ if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
+ (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
@@ -1040,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1048,6 +1171,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
+
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
adev->firmware.fw_buf_ptr + fw_offset);
@@ -1062,7 +1186,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
{
if (block_type == MP0_HWIP) {
- switch (adev->ip_versions[MP0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -1103,6 +1227,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
case IP_VERSION(11, 0, 13):
return "beige_goby";
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
return "vangogh";
case IP_VERSION(12, 0, 1):
return "green_sardine";
@@ -1113,7 +1238,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return "yellow_carp";
}
} else if (block_type == MP1_HWIP) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
@@ -1139,7 +1264,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return "aldebaran_smc";
}
} else if (block_type == SDMA0_HWIP) {
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 0, 0):
return "vega10_sdma";
case IP_VERSION(4, 0, 1):
@@ -1183,7 +1308,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return "vangogh_sdma";
}
} else if (block_type == UVD_HWIP) {
- switch (adev->ip_versions[UVD_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
@@ -1208,7 +1333,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(10, 3, 0))
return "sienna_cichlid_vcn";
return "navy_flounder_vcn";
case IP_VERSION(3, 0, 2):
@@ -1221,7 +1347,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return "yellow_carp_vcn";
}
} else if (block_type == GC_HWIP) {
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 0, 1):
return "vega10";
case IP_VERSION(9, 2, 1):
@@ -1269,12 +1395,25 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
char *ip_name;
const char *legacy;
- uint32_t version = adev->ip_versions[block_type][0];
+ uint32_t version = amdgpu_ip_version(adev, block_type, 0);
legacy = amdgpu_ucode_legacy_naming(adev, block_type);
if (legacy) {
@@ -1298,6 +1437,12 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
case UVD_HWIP:
ip_name = "vcn";
break;
+ case VPE_HWIP:
+ ip_name = "vpe";
+ break;
+ case ISP_HWIP:
+ ip_name = "isp";
+ break;
default:
BUG();
}
@@ -1314,24 +1459,49 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
- * @fw_name: firmware to load
+ * @required: whether the firmware is required
+ * @fmt: firmware name format string
+ * @...: variable arguments
*
* This is a helper that will use request_firmware and amdgpu_ucode_validate
* to load and run basic validation on firmware. If the load fails, remap
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fw_name)
+ enum amdgpu_ucode_required required, const char *fmt, ...)
{
- int err = request_firmware(fw, fw_name, adev->dev);
+ char fname[AMDGPU_UCODE_NAME_MAX];
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(fname, sizeof(fname), fmt, ap);
+ va_end(ap);
+ if (r == sizeof(fname)) {
+ dev_warn(adev->dev, "amdgpu firmware name buffer overflow\n");
+ return -EOVERFLOW;
+ }
- if (err)
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
+ if (r)
return -ENODEV;
- err = amdgpu_ucode_validate(*fw);
- if (err)
- dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
- return err;
+ r = amdgpu_ucode_validate(*fw);
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
+
+ return r;
}
/*