diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 150 |
1 files changed, 142 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 1d9d187de6ee..174badca27e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -158,6 +158,7 @@ static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) const struct common_firmware_header *header = NULL; const struct sdma_firmware_header_v1_0 *hdr; const struct sdma_firmware_header_v2_0 *hdr_v2; + const struct sdma_firmware_header_v3_0 *hdr_v3; header = (const struct common_firmware_header *) sdma_inst->fw->data; @@ -174,6 +175,11 @@ static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version); sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version); break; + case 3: + hdr_v3 = (const struct sdma_firmware_header_v3_0 *)sdma_inst->fw->data; + sdma_inst->fw_version = le32_to_cpu(hdr_v3->header.ucode_version); + sdma_inst->feature_version = le32_to_cpu(hdr_v3->ucode_feature_version); + break; default: return -EINVAL; } @@ -206,16 +212,19 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, const struct common_firmware_header *header = NULL; int err, i; const struct sdma_firmware_header_v2_0 *sdma_hdr; + const struct sdma_firmware_header_v3_0 *sdma_hv3; uint16_t version_major; char ucode_prefix[30]; - char fw_name[52]; amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix)); if (instance == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance); - err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s%d.bin", ucode_prefix, instance); if (err) goto out; @@ -251,11 +260,14 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, else { /* Use a single copy per SDMA firmware type. PSP uses the same instance for all * groups of SDMAs */ - if (amdgpu_ip_version(adev, SDMA0_HWIP, - 0) == - IP_VERSION(4, 4, 2) && + if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 4, 2) || + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 4, 4) || + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == + IP_VERSION(4, 4, 5)) && adev->firmware.load_type == - AMDGPU_FW_LOAD_PSP && + AMDGPU_FW_LOAD_PSP && adev->sdma.num_inst_per_aid == i) { break; } @@ -281,6 +293,15 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, adev->firmware.fw_size += ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE); break; + case 3: + sdma_hv3 = (const struct sdma_firmware_header_v3_0 *) + adev->sdma.instance[0].fw->data; + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_RS64]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA_RS64; + info->fw = adev->sdma.instance[0].fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE); + break; default: err = -EINVAL; } @@ -326,3 +347,116 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) return 0; } + +/* + * debugfs for to enable/disable sdma job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = BIT_ULL(adev->sdma.num_instances) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (val & BIT_ULL(i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops, + amdgpu_debugfs_sdma_sched_mask_get, + amdgpu_debugfs_sdma_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->sdma.num_instances > 1)) + return; + sprintf(name, "amdgpu_sdma_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_sdma_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset); +} + +static DEVICE_ATTR(sdma_reset_mask, 0444, + amdgpu_get_sdma_reset_mask, NULL); + +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) + return r; + + if (adev->sdma.num_instances) { + r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_gpu_recovery) + return; + + if (adev->dev->kobj.sd) { + if (adev->sdma.num_instances) + device_remove_file(adev->dev, &dev_attr_sdma_reset_mask); + } +} |