diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 581 |
1 files changed, 411 insertions, 170 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ca945055e683..6ee77f431d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -43,7 +43,9 @@ #include "amdgpu_gem.h" #include "amdgpu_display.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "amd_pcie.h" +#include "amdgpu_userq.h" void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) { @@ -89,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) return; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD)) DRM_WARN("smart shift update failed\n"); amdgpu_acpi_fini(adev); @@ -149,38 +151,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) goto out; } - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - if (amdgpu_device_supports_px(dev) && - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; - dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; - dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_device_supports_baco(dev) && - (amdgpu_runtime_pm != 0)) { - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - /* enable BACO as runpm mode if runpm=1 */ - if (amdgpu_runtime_pm > 0) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - case CHIP_VEGA10: - /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - default: - /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - } - - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } + amdgpu_device_detect_runtime_pm_mode(adev); /* Call ACPI methods: require modeset init * but failure is not fatal @@ -190,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) if (acpi_status) dev_dbg(dev->dev, "Error during ACPI methods call\n"); - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD)) DRM_WARN("smart shift update failed\n"); out: @@ -200,6 +171,44 @@ out: return r; } +static enum amd_ip_block_type + amdgpu_ip_get_block_type(struct amdgpu_device *adev, uint32_t ip) +{ + enum amd_ip_block_type type; + + switch (ip) { + case AMDGPU_HW_IP_GFX: + type = AMD_IP_BLOCK_TYPE_GFX; + break; + case AMDGPU_HW_IP_COMPUTE: + type = AMD_IP_BLOCK_TYPE_GFX; + break; + case AMDGPU_HW_IP_DMA: + type = AMD_IP_BLOCK_TYPE_SDMA; + break; + case AMDGPU_HW_IP_UVD: + case AMDGPU_HW_IP_UVD_ENC: + type = AMD_IP_BLOCK_TYPE_UVD; + break; + case AMDGPU_HW_IP_VCE: + type = AMD_IP_BLOCK_TYPE_VCE; + break; + case AMDGPU_HW_IP_VCN_DEC: + case AMDGPU_HW_IP_VCN_ENC: + type = AMD_IP_BLOCK_TYPE_VCN; + break; + case AMDGPU_HW_IP_VCN_JPEG: + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + break; + default: + type = AMD_IP_BLOCK_TYPE_NUM; + break; + } + + return type; +} + static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, struct drm_amdgpu_query_fw *query_fw, struct amdgpu_device *adev) @@ -352,12 +361,36 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->gfx.imu_fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_VPE: + fw_info->ver = adev->vpe.fw_version; + fw_info->feature = adev->vpe.feature_version; + break; default: return -EINVAL; } return 0; } +static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev, + struct drm_amdgpu_info *info, + struct drm_amdgpu_info_uq_metadata_gfx *meta) +{ + int ret = -EOPNOTSUPP; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + struct amdgpu_gfx_shadow_info shadow = {}; + + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true); + meta->shadow_size = shadow.shadow_size; + meta->shadow_alignment = shadow.shadow_alignment; + meta->csa_size = shadow.csa_size; + meta->csa_alignment = shadow.csa_alignment; + ret = 0; + } + + return ret; +} + static int amdgpu_hw_ip_info(struct amdgpu_device *adev, struct drm_amdgpu_info *info, struct drm_amdgpu_info_hw_ip *result) @@ -366,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; unsigned int num_rings = 0; + uint32_t num_slots = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -375,24 +409,45 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - if (adev->gfx.gfx_ring[i].sched.ready) + if (adev->gfx.gfx_ring[i].sched.ready && + !adev->gfx.gfx_ring[i].no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) + num_slots += hweight32(adev->mes.gfx_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - if (adev->gfx.compute_ring[i].sched.ready) + if (adev->gfx.compute_ring[i].sched.ready && + !adev->gfx.compute_ring[i].no_user_submission) ++num_rings; + + if (!adev->sdma.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) + num_slots += hweight32(adev->mes.compute_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - if (adev->sdma.instance[i].ring.sched.ready) + if (adev->sdma.instance[i].ring.sched.ready && + !adev->sdma.instance[i].ring.no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) + num_slots += hweight32(adev->mes.sdma_hqd_mask[i]); + } + ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -402,19 +457,21 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->uvd.harvest_config & (1 << i)) continue; - if (adev->uvd.inst[i].ring.sched.ready) + if (adev->uvd.inst[i].ring.sched.ready && + !adev->uvd.inst[i].ring.no_user_submission) ++num_rings; } - ib_start_alignment = 64; + ib_start_alignment = 256; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - if (adev->vce.ring[i].sched.ready) + if (adev->vce.ring[i].sched.ready && + !adev->vce.ring[i].no_user_submission) ++num_rings; - ib_start_alignment = 4; - ib_size_alignment = 1; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; @@ -423,11 +480,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, continue; for (j = 0; j < adev->uvd.num_enc_rings; j++) - if (adev->uvd.inst[i].ring_enc[j].sched.ready) + if (adev->uvd.inst[i].ring_enc[j].sched.ready && + !adev->uvd.inst[i].ring_enc[j].no_user_submission) ++num_rings; } - ib_start_alignment = 64; - ib_size_alignment = 64; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; @@ -435,11 +493,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->vcn.inst[i].ring_dec.sched.ready) + if (adev->vcn.inst[i].ring_dec.sched.ready && + !adev->vcn.inst[i].ring_dec.no_user_submission) ++num_rings; } - ib_start_alignment = 16; - ib_size_alignment = 16; + ib_start_alignment = 256; + ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; @@ -447,12 +506,13 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->vcn.harvest_config & (1 << i)) continue; - for (j = 0; j < adev->vcn.num_enc_rings; j++) - if (adev->vcn.inst[i].ring_enc[j].sched.ready) + for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++) + if (adev->vcn.inst[i].ring_enc[j].sched.ready && + !adev->vcn.inst[i].ring_enc[j].no_user_submission) ++num_rings; } - ib_start_alignment = 64; - ib_size_alignment = 1; + ib_start_alignment = 256; + ib_size_alignment = 4; break; case AMDGPU_HW_IP_VCN_JPEG: type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? @@ -462,11 +522,21 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->jpeg.harvest_config & (1 << i)) continue; - if (adev->jpeg.inst[i].ring_dec.sched.ready) - ++num_rings; + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) + if (adev->jpeg.inst[i].ring_dec[j].sched.ready && + !adev->jpeg.inst[i].ring_dec[j].no_user_submission) + ++num_rings; } - ib_start_alignment = 16; - ib_size_alignment = 16; + ib_start_alignment = 256; + ib_size_alignment = 64; + break; + case AMDGPU_HW_IP_VPE: + type = AMD_IP_BLOCK_TYPE_VPE; + if (adev->vpe.ring.sched.ready && + !adev->vpe.ring.no_user_submission) + ++num_rings; + ib_start_alignment = 256; + ib_size_alignment = 4; break; default: return -EINVAL; @@ -489,18 +559,26 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->asic_type >= CHIP_VEGA10) { switch (type) { case AMD_IP_BLOCK_TYPE_GFX: - result->ip_discovery_version = adev->ip_versions[GC_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, GC_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_SDMA: - result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, SDMA0_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_JPEG: - result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, UVD_HWIP, 0)); break; case AMD_IP_BLOCK_TYPE_VCE: - result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0]; + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCE_HWIP, 0)); + break; + case AMD_IP_BLOCK_TYPE_VPE: + result->ip_discovery_version = + IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0)); break; default: result->ip_discovery_version = 0; @@ -511,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, } result->capabilities_flags = 0; result->available_rings = (1 << num_rings) - 1; + result->userq_num_slots = num_slots; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; @@ -537,11 +616,16 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info *info = data; struct amdgpu_mode_info *minfo = &adev->mode_info; void __user *out = (void __user *)(uintptr_t)info->return_pointer; + struct amdgpu_fpriv *fpriv; + struct amdgpu_ip_block *ip_block; + enum amd_ip_block_type type; + struct amdgpu_xcp *xcp; + u32 count, inst_mask; uint32_t size = info->return_size; struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, found; + int i, found, ret; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -556,6 +640,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) crtc = (struct drm_crtc *)minfo->crtcs[i]; if (crtc && crtc->base.id == info->mode_crtc.id) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + ui32 = amdgpu_crtc->crtc_id; found = 1; break; @@ -568,56 +653,82 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; case AMDGPU_INFO_HW_IP_INFO: { struct drm_amdgpu_info_hw_ip ip = {}; - int ret; ret = amdgpu_hw_ip_info(adev, info, &ip); if (ret) return ret; - ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); + ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip))); return ret ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { - enum amd_ip_block_type type; - uint32_t count = 0; + fpriv = (struct amdgpu_fpriv *)filp->driver_priv; + type = amdgpu_ip_get_block_type(adev, info->query_hw_ip.type); + ip_block = amdgpu_device_ip_get_ip_block(adev, type); - switch (info->query_hw_ip.type) { - case AMDGPU_HW_IP_GFX: - type = AMD_IP_BLOCK_TYPE_GFX; - break; - case AMDGPU_HW_IP_COMPUTE: - type = AMD_IP_BLOCK_TYPE_GFX; - break; - case AMDGPU_HW_IP_DMA: - type = AMD_IP_BLOCK_TYPE_SDMA; - break; - case AMDGPU_HW_IP_UVD: - type = AMD_IP_BLOCK_TYPE_UVD; + if (!ip_block || !ip_block->status.valid) + return -EINVAL; + + if (adev->xcp_mgr && adev->xcp_mgr->num_xcps > 0 && + fpriv->xcp_id < adev->xcp_mgr->num_xcps) { + xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id]; + switch (type) { + case AMD_IP_BLOCK_TYPE_GFX: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + if (ret) + return ret; + count = hweight32(inst_mask); + break; + case AMD_IP_BLOCK_TYPE_SDMA: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_SDMA, &inst_mask); + if (ret) + return ret; + count = hweight32(inst_mask); + break; + case AMD_IP_BLOCK_TYPE_JPEG: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + if (ret) + return ret; + count = hweight32(inst_mask) * adev->jpeg.num_jpeg_rings; + break; + case AMD_IP_BLOCK_TYPE_VCN: + ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + if (ret) + return ret; + count = hweight32(inst_mask); + break; + default: + return -EINVAL; + } + + return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; + } + + switch (type) { + case AMD_IP_BLOCK_TYPE_GFX: + case AMD_IP_BLOCK_TYPE_VCE: + count = 1; break; - case AMDGPU_HW_IP_VCE: - type = AMD_IP_BLOCK_TYPE_VCE; + case AMD_IP_BLOCK_TYPE_SDMA: + count = adev->sdma.num_instances; break; - case AMDGPU_HW_IP_UVD_ENC: - type = AMD_IP_BLOCK_TYPE_UVD; + case AMD_IP_BLOCK_TYPE_JPEG: + count = adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings; break; - case AMDGPU_HW_IP_VCN_DEC: - case AMDGPU_HW_IP_VCN_ENC: - type = AMD_IP_BLOCK_TYPE_VCN; + case AMD_IP_BLOCK_TYPE_VCN: + count = adev->vcn.num_vcn_inst; break; - case AMDGPU_HW_IP_VCN_JPEG: - type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? - AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + case AMD_IP_BLOCK_TYPE_UVD: + count = adev->uvd.num_uvd_inst; break; + /* For all other IP block types not listed in the switch statement + * the ip status is valid here and the instance count is one. + */ default: - return -EINVAL; + count = 1; + break; } - for (i = 0; i < adev->num_ip_blocks; i++) - if (adev->ip_blocks[i].version->type == type && - adev->ip_blocks[i].status.valid && - count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) - count++; - return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: @@ -625,7 +736,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; - int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) @@ -648,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); + ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); @@ -694,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.vram.usable_heap_size = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; - mem.vram.heap_usage = - ttm_resource_manager_usage(vram_man); + mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(vram_man) : 0; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -720,32 +831,47 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size; + int ret = 0; + unsigned int n, alloc_size; uint32_t *regs; - unsigned se_num = (info->read_mmr_reg.instance >> + unsigned int se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & AMDGPU_INFO_MMR_SE_INDEX_MASK; - unsigned sh_num = (info->read_mmr_reg.instance >> + unsigned int sh_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_MASK; + if (!down_read_trylock(&adev->reset_domain->sem)) + return -ENOENT; + /* set full masks if the userspace set all bits - * in the bitfields */ - if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) + * in the bitfields + */ + if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) { se_num = 0xffffffff; - else if (se_num >= AMDGPU_GFX_MAX_SE) - return -EINVAL; - if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) + } else if (se_num >= AMDGPU_GFX_MAX_SE) { + ret = -EINVAL; + goto out; + } + + if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) { sh_num = 0xffffffff; - else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) - return -EINVAL; + } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) { + ret = -EINVAL; + goto out; + } - if (info->read_mmr_reg.count > 128) - return -EINVAL; + if (info->read_mmr_reg.count > 128) { + ret = -EINVAL; + goto out; + } regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); - if (!regs) - return -ENOMEM; + if (!regs) { + ret = -ENOMEM; + goto out; + } + alloc_size = info->read_mmr_reg.count * sizeof(*regs); amdgpu_gfx_off_ctrl(adev, false); @@ -757,19 +883,22 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) info->read_mmr_reg.dword_offset + i); kfree(regs); amdgpu_gfx_off_ctrl(adev, true); - return -EFAULT; + ret = -EFAULT; + goto out; } } amdgpu_gfx_off_ctrl(adev, true); n = copy_to_user(out, regs, min(size, alloc_size)); kfree(regs); - return n ? -EFAULT : 0; + ret = (n ? -EFAULT : 0); +out: + up_read(&adev->reset_domain->sem); + return ret; } case AMDGPU_INFO_DEV_INFO: { struct drm_amdgpu_info_device *dev_info; uint64_t vm_size; - uint32_t pcie_gen_mask; - int ret; + uint32_t pcie_gen_mask, pcie_width_mask; dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); if (!dev_info) @@ -804,20 +933,35 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->ids_flags = 0; if (adev->flags & AMD_IS_APU) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; - if (amdgpu_mcbp) + if (adev->gfx.mcbp) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; if (amdgpu_is_tmz(adev)) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ; + if (adev->gfx.config.ta_cntl2_truncate_coord_mode) + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; + + /* Gang submit is not supported under SRIOV currently */ + if (!amdgpu_sriov_vf(adev)) + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT; + + if (amdgpu_passthrough(adev)) + dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT << + AMDGPU_IDS_FLAGS_MODE_SHIFT) & + AMDGPU_IDS_FLAGS_MODE_MASK; + else if (amdgpu_sriov_vf(adev)) + dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF << + AMDGPU_IDS_FLAGS_MODE_SHIFT) & + AMDGPU_IDS_FLAGS_MODE_MASK; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - vm_size -= AMDGPU_VA_RESERVED_SIZE; + vm_size -= AMDGPU_VA_RESERVED_TOP; /* Older VCE FW versions are buggy and can handle only 40bits */ if (adev->vce.fw_version && adev->vce.fw_version < AMDGPU_VCE_FW_53_45) vm_size = min(vm_size, 1ULL << 40); - dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; + dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM; dev_info->virtual_address_max = min(vm_size, AMDGPU_GMC_HOLE_START); @@ -834,7 +978,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], sizeof(adev->gfx.cu_info.ao_cu_bitmap)); memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], - sizeof(adev->gfx.cu_info.bitmap)); + sizeof(dev_info->cu_bitmap)); dev_info->vram_type = adev->gmc.vram_type; dev_info->vram_bit_width = adev->gmc.vram_width; dev_info->vce_harvest_config = adev->vce.harvest_config; @@ -855,15 +999,42 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; /* Combine the chip gen mask with the platform (CPU/mobo) mask. */ - pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16); + pcie_gen_mask = adev->pm.pcie_gen_mask & + (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT); + pcie_width_mask = adev->pm.pcie_mlw_mask & + (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT); dev_info->pcie_gen = fls(pcie_gen_mask); dev_info->pcie_num_lanes = - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 : - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 : - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 : - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 : - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 : - adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1; + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 : + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 : + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 : + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 : + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 : + pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1; + + dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size; + dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp; + dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc; + dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc; + dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance * + adev->gfx.config.gc_gl1c_per_sa; + dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu; + dev_info->mall_size = adev->gmc.mall_size; + + + if (adev->gfx.funcs->get_gfx_shadow_info) { + struct amdgpu_gfx_shadow_info shadow_info; + + ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info); + if (!ret) { + dev_info->shadow_size = shadow_info.shadow_size; + dev_info->shadow_alignment = shadow_info.shadow_alignment; + dev_info->csa_size = shadow_info.csa_size; + dev_info->csa_alignment = shadow_info.csa_alignment; + } + } + + dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev); ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; @@ -871,7 +1042,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return ret; } case AMDGPU_INFO_VCE_CLOCK_TABLE: { - unsigned i; + unsigned int i; struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; struct amd_vce_state *vce_state; @@ -913,12 +1084,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct atom_context *atom_context; atom_context = adev->mode_info.atom_context; - memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); - memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); - vbios_info.version = atom_context->version; - memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, - sizeof(atom_context->vbios_ver_str)); - memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); + if (atom_context) { + memcpy(vbios_info.name, atom_context->name, + sizeof(atom_context->name)); + memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, + sizeof(atom_context->vbios_pn)); + vbios_info.version = atom_context->version; + memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, + sizeof(atom_context->vbios_ver_str)); + memcpy(vbios_info.date, atom_context->date, + sizeof(atom_context->date)); + } return copy_to_user(out, &vbios_info, min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; @@ -992,7 +1168,21 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: /* get average GPU power */ if (amdgpu_dpm_read_sensor(adev, - AMDGPU_PP_SENSOR_GPU_POWER, + AMDGPU_PP_SENSOR_GPU_AVG_POWER, + (void *)&ui32, &ui32_size)) { + /* fall back to input power for backwards compat */ + if (amdgpu_dpm_read_sensor(adev, + AMDGPU_PP_SENSOR_GPU_INPUT_POWER, + (void *)&ui32, &ui32_size)) { + return -EINVAL; + } + } + ui32 >>= 8; + break; + case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER: + /* get input GPU power */ + if (amdgpu_dpm_read_sensor(adev, + AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&ui32, &ui32_size)) { return -EINVAL; } @@ -1077,6 +1267,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info_video_caps *caps; int r; + if (!adev->asic_funcs->query_video_codecs) + return -EINVAL; + switch (info->video_cap.type) { case AMDGPU_INFO_VIDEO_CAPS_DECODE: r = amdgpu_asic_query_video_codecs(adev, false, &codecs); @@ -1129,6 +1322,51 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) kfree(caps); return r; } + case AMDGPU_INFO_MAX_IBS: { + uint32_t max_ibs[AMDGPU_HW_IP_NUM]; + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + max_ibs[i] = amdgpu_ring_max_ibs(i); + + return copy_to_user(out, max_ibs, + min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0; + } + case AMDGPU_INFO_GPUVM_FAULT: { + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + struct drm_amdgpu_info_gpuvm_fault gpuvm_fault; + unsigned long flags; + + if (!vm) + return -EINVAL; + + memset(&gpuvm_fault, 0, sizeof(gpuvm_fault)); + + xa_lock_irqsave(&adev->vm_manager.pasids, flags); + gpuvm_fault.addr = vm->fault_info.addr; + gpuvm_fault.status = vm->fault_info.status; + gpuvm_fault.vmhub = vm->fault_info.vmhub; + xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); + + return copy_to_user(out, &gpuvm_fault, + min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0; + } + case AMDGPU_INFO_UQ_FW_AREAS: { + struct drm_amdgpu_info_uq_metadata meta_info = {}; + + switch (info->query_hw_ip.type) { + case AMDGPU_HW_IP_GFX: + ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx); + if (ret) + return ret; + + ret = copy_to_user(out, &meta_info, + min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0; + return 0; + default: + return -EINVAL; + } + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -1136,23 +1374,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return 0; } - -/* - * Outdated mess for old drm with Xorg being in charge (void function now). - */ -/** - * amdgpu_driver_lastclose_kms - drm callback for last close - * - * @dev: drm dev pointer - * - * Switch vga_switcheroo state after last close (all asics). - */ -void amdgpu_driver_lastclose_kms(struct drm_device *dev) -{ - drm_fb_helper_lastclose(dev); - vga_switcheroo_process_delayed_switch(); -} - /** * amdgpu_driver_open_kms - drm callback for open * @@ -1195,13 +1416,15 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) pasid = 0; } - r = amdgpu_vm_init(adev, &fpriv->vm); + r = amdgpu_xcp_open_device(adev, fpriv, file_priv); if (r) goto error_pasid; - r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); + amdgpu_debugfs_vm_init(file_priv); + + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid); if (r) - goto error_vm; + goto error_pasid; fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); if (!fpriv->prt_va) { @@ -1209,7 +1432,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto error_vm; } - if (amdgpu_mcbp) { + if (adev->gfx.mcbp) { uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, @@ -1218,9 +1441,21 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto error_vm; } + r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va); + if (r) + goto error_vm; + mutex_init(&fpriv->bo_list_lock); idr_init_base(&fpriv->bo_list_handles, 1); + r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev); + if (r) + DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n"); + + r = amdgpu_eviction_fence_init(&fpriv->evf_mgr); + if (r) + goto error_vm; + amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev); file_priv->driver_priv = fpriv; @@ -1230,15 +1465,12 @@ error_vm: amdgpu_vm_fini(adev, &fpriv->vm); error_pasid: - if (pasid) { + if (pasid) amdgpu_pasid_free(pasid); - amdgpu_vm_set_pasid(adev, &fpriv->vm, 0); - } kfree(fpriv); out_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_put: pm_runtime_put_autosuspend(dev->dev); @@ -1273,14 +1505,16 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv); - if (amdgpu_mcbp) { - /* TODO: how to handle reserve failure */ - BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_del(adev, fpriv->csa_va); + if (fpriv->csa_va) { + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; + + WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, + fpriv->csa_va, csa_addr)); fpriv->csa_va = NULL; - amdgpu_bo_unreserve(adev->virt.csa_obj); } + amdgpu_seq64_unmap(adev, fpriv); + pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.bo); if (!WARN_ON(amdgpu_bo_reserve(pd, true))) { @@ -1304,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } @@ -1430,7 +1663,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; @@ -1438,7 +1671,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { -#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type +#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type TA_FW_NAME(XGMI), TA_FW_NAME(RAS), TA_FW_NAME(HDCP), @@ -1537,7 +1770,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) fw_info.feature, fw_info.ver); /* RLCV */ - query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV; + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; @@ -1681,7 +1914,15 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); - seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); + /* VPE */ + query_fw.fw_type = AMDGPU_INFO_FW_VPE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VPE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn); return 0; } |
