diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 112 |
1 files changed, 99 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 0867fd9e15ba..eaddc441c51a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -28,6 +28,12 @@ #include "amdgpu.h" #include "amdgpu_ucode.h" +#define AMDGPU_UCODE_NAME_MAX (128) + +static const struct kicker_device kicker_device_list[] = { + {0x744B, 0x00}, +}; + static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) { DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); @@ -323,6 +329,12 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr) DRM_DEBUG("ctl_ucode_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_ucode_offset)); DRM_DEBUG("ctl_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_offset)); DRM_DEBUG("ctl_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_size)); + } else if (version_major == 3) { + const struct sdma_firmware_header_v3_0 *sdma_hdr = + container_of(hdr, struct sdma_firmware_header_v3_0, header); + + DRM_DEBUG("ucode_reversion: %u\n", + le32_to_cpu(sdma_hdr->ucode_feature_version)); } else { DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", version_major, version_minor); @@ -682,6 +694,32 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "UMSCH_MM_CMD_BUFFER"; case AMDGPU_UCODE_ID_JPEG_RAM: return "JPEG"; + case AMDGPU_UCODE_ID_SDMA_RS64: + return "RS64_SDMA"; + case AMDGPU_UCODE_ID_CP_RS64_PFP: + return "RS64_PFP"; + case AMDGPU_UCODE_ID_CP_RS64_ME: + return "RS64_ME"; + case AMDGPU_UCODE_ID_CP_RS64_MEC: + return "RS64_MEC"; + case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: + return "RS64_PFP_P0_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: + return "RS64_PFP_P1_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: + return "RS64_ME_P0_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: + return "RS64_ME_P1_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: + return "RS64_MEC_P0_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: + return "RS64_MEC_P1_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: + return "RS64_MEC_P2_STACK"; + case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: + return "RS64_MEC_P3_STACK"; + case AMDGPU_UCODE_ID_ISP: + return "ISP"; default: return "UNKNOWN UCODE"; } @@ -731,8 +769,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); +FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version); FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK); +FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version); static struct attribute *fw_attrs[] = { &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, @@ -745,8 +785,9 @@ static struct attribute *fw_attrs[] = { &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr, &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, - &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, - &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr, + &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr, + &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr, + &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr, NULL }; @@ -791,6 +832,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; const struct mes_firmware_header_v1_0 *mes_hdr = NULL; const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL; + const struct sdma_firmware_header_v3_0 *sdmav3_hdr = NULL; const struct imu_firmware_header_v1_0 *imu_hdr = NULL; const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL; const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL; @@ -812,6 +854,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data; + sdmav3_hdr = (const struct sdma_firmware_header_v3_0 *)ucode->fw->data; imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data; vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data; umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data; @@ -828,6 +871,11 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode_addr = (u8 *)ucode->fw->data + le32_to_cpu(sdma_hdr->ctl_ucode_offset); break; + case AMDGPU_UCODE_ID_SDMA_RS64: + ucode->ucode_size = le32_to_cpu(sdmav3_hdr->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(sdmav3_hdr->header.ucode_array_offset_bytes); + break; case AMDGPU_UCODE_ID_CP_MEC1: case AMDGPU_UCODE_ID_CP_MEC2: ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - @@ -1175,6 +1223,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl case IP_VERSION(11, 0, 13): return "beige_goby"; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): return "vangogh"; case IP_VERSION(12, 0, 1): return "green_sardine"; @@ -1342,6 +1391,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return NULL; } +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { + if (adev->pdev->device == kicker_device_list[i].device && + adev->pdev->revision == kicker_device_list[i].revision) + return true; + } + + return false; +} + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) { int maj, min, rev; @@ -1374,6 +1436,9 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, case VPE_HWIP: ip_name = "vpe"; break; + case ISP_HWIP: + ip_name = "isp"; + break; default: BUG(); } @@ -1390,28 +1455,49 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, * * @adev: amdgpu device * @fw: pointer to load firmware to - * @fw_name: firmware to load + * @required: whether the firmware is required + * @fmt: firmware name format string + * @...: variable arguments * * This is a helper that will use request_firmware and amdgpu_ucode_validate * to load and run basic validation on firmware. If the load fails, remap * the error code to -ENODEV, so that early_init functions will fail to load. */ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw, - const char *fw_name) + enum amdgpu_ucode_required required, const char *fmt, ...) { - int err = request_firmware(fw, fw_name, adev->dev); + char fname[AMDGPU_UCODE_NAME_MAX]; + va_list ap; + int r; + + va_start(ap, fmt); + r = vsnprintf(fname, sizeof(fname), fmt, ap); + va_end(ap); + if (r == sizeof(fname)) { + dev_warn(adev->dev, "amdgpu firmware name buffer overflow\n"); + return -EOVERFLOW; + } - if (err) + if (required == AMDGPU_UCODE_REQUIRED) + r = request_firmware(fw, fname, adev->dev); + else { + r = firmware_request_nowarn(fw, fname, adev->dev); + if (r) + drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname); + } + if (r) return -ENODEV; - err = amdgpu_ucode_validate(*fw); - if (err) { - dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name); - release_firmware(*fw); - *fw = NULL; - } + r = amdgpu_ucode_validate(*fw); + if (r) + /* + * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release() + * regardless of success/failure, and the amdgpu_ucode_release() takes care of + * firmware release and need to avoid redundant release FW operation here. + */ + dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname); - return err; + return r; } /* |