diff options
author | Dave Airlie <airlied@redhat.com> | 2025-05-19 09:00:37 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2025-05-19 09:02:41 +1000 |
commit | 72dc7c0585cbdb5509bf22eb9d3575ff0f5f7a2a (patch) | |
tree | 20f60e4761668f03cf89ffa4f3ebe40b715dcc65 /drivers | |
parent | 5dca4335ba3316b5433b85f2325311ae144cf68a (diff) | |
parent | 2f0268ca1cac4561b4710882c2d27afa32e42cb3 (diff) |
Merge tag 'amd-drm-next-6.16-2025-05-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amdgpu:
- Misc code cleanups
- UserQ fixes
- MALL reporting fix
- DP AUX fixes
- DCN 3.5 fixes
- DP MST fixes
- DC DMI quirks cleanup
- RAS fixes
- SR-IOV updates
- GC 9.5 updates
- Misc display fixes
- VCN 4.0.5 powergating race fix
- SMU 13.x updates
- Paritioning fixes
- VCN 5.0.1 SR-IOV updates
- JPEG 5.0.1 SR-IOV updates
amdkfd:
- Fix spurious warning in interrupt code
- XNACK fixes
radeon:
- CIK doorbell cleanup
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20250516204609.2437472-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers')
112 files changed, 2041 insertions, 1127 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index eb015bdda8a7..c7d32fb216e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, case ATOM_DGPU_VRAM_TYPE_GDDR6: vram_type = AMDGPU_VRAM_TYPE_GDDR6; break; + case ATOM_DGPU_VRAM_TYPE_HBM3E: + vram_type = AMDGPU_VRAM_TYPE_HBM3E; + break; default: vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index cfdf558b48b6..02138aa55793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a1450f13d963..8e626f50b362 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); + amdgpu_psp_debugfs_init(adev); debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c index 1a7469543db5..73b629b5f56f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c @@ -183,7 +183,7 @@ void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr) dma_fence_wait(&ev_fence->base, false); /* Last unref of ev_fence */ - dma_fence_put(&evf_mgr->ev_fence->base); + dma_fence_put(&ev_fence->base); } int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index bd7fc123b8f9..80fa29c26e9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -62,6 +62,9 @@ */ #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL +/* XNACK flags */ +#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0) + struct firmware; enum amdgpu_memory_partition { @@ -301,6 +304,7 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; int noretry; + uint32_t xnack_flags; uint32_t vmid0_page_table_block_size; uint32_t vmid0_page_table_depth; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 19ce4da285e8..5517451fc75d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -619,6 +619,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type) { + /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ + if (amdgpu_ras_is_rma(adev)) + return -EINVAL; + if (!adev->irq.installed) return -ENOENT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8d4a2aed7231..9fbb04aee97b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1425,16 +1425,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) mutex_init(&fpriv->bo_list_lock); idr_init_base(&fpriv->bo_list_handles, 1); + r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev); + if (r) + DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n"); + r = amdgpu_eviction_fence_init(&fpriv->evf_mgr); if (r) goto error_vm; amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev); - r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev); - if (r) - DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n"); - file_priv->driver_priv = fpriv; goto out_suspend; @@ -1502,10 +1502,11 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_bo_unreserve(pd); } - fpriv->evf_mgr.fd_closing = true; - amdgpu_userq_mgr_fini(&fpriv->userq_mgr); - amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); - + if (!fpriv->evf_mgr.fd_closing) { + fpriv->evf_mgr.fd_closing = true; + amdgpu_userq_mgr_fini(&fpriv->userq_mgr); + amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); + } amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index dbe57996a481..73403744331a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1044,7 +1044,8 @@ static const char * const amdgpu_vram_names[] = { "GDDR6", "DDR5", "LPDDR4", - "LPDDR5" + "LPDDR5", + "HBM3E" }; /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index bb67d2bbd7f0..e6f0b035e20b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -4186,6 +4186,110 @@ const struct attribute_group amdgpu_flash_attr_group = { .is_visible = amdgpu_flash_attr_is_visible, }; +#if defined(CONFIG_DEBUG_FS) +static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet; + int ret; + + /* serialize the open() file calling */ + if (!mutex_trylock(&adev->psp.mutex)) + return -EBUSY; + + /* + * make sure only one userpace process is alive for dumping so that + * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. + * let's say the case where one process try opening the file while + * another one has proceeded to read or release. In this way, eliminate + * the use of mutex for read() or release() callback as well. + */ + if (adev->psp.spirom_dump_trip) { + mutex_unlock(&adev->psp.mutex); + return -EBUSY; + } + + bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); + if (!bo_triplet) { + mutex_unlock(&adev->psp.mutex); + return -ENOMEM; + } + + ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo_triplet->bo, + &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); + if (ret) + goto rel_trip; + + ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); + if (ret) + goto rel_bo; + + adev->psp.spirom_dump_trip = bo_triplet; + mutex_unlock(&adev->psp.mutex); + return 0; +rel_bo: + amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); +rel_trip: + kfree(bo_triplet); + mutex_unlock(&adev->psp.mutex); + dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); + return ret; +} + +static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; + + if (!bo_triplet) + return -EINVAL; + + return simple_read_from_buffer(buf, + size, + pos, bo_triplet->cpu_addr, + AMD_VBIOS_FILE_MAX_SIZE_B * 2); +} + +static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; + + if (bo_triplet) { + amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); + kfree(bo_triplet); + } + + adev->psp.spirom_dump_trip = NULL; + return 0; +} + +static const struct file_operations psp_dump_spirom_debugfs_ops = { + .owner = THIS_MODULE, + .open = psp_read_spirom_debugfs_open, + .read = psp_read_spirom_debugfs_read, + .release = psp_read_spirom_debugfs_release, + .llseek = default_llseek, +}; +#endif + +void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + + debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, + adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); +#endif +} + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index dcf5e8e0b9e3..428adc7f741d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -39,6 +39,18 @@ #define PSP_TMR_ALIGNMENT 0x100000 #define PSP_FW_NAME_LEN 0x24 +/* VBIOS gfl defines */ +#define MBOX_READY_MASK 0x80000000 +#define MBOX_STATUS_MASK 0x0000FFFF +#define MBOX_COMMAND_MASK 0x00FF0000 +#define MBOX_READY_FLAG 0x80000000 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3 +#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4 +#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf +#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10 +#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11 + extern const struct attribute_group amdgpu_flash_attr_group; enum psp_shared_mem_size { @@ -138,6 +150,7 @@ struct psp_funcs { int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); + int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); @@ -322,6 +335,14 @@ struct psp_runtime_scpm_entry { enum psp_runtime_scpm_authentication scpm_status; }; +#if defined(CONFIG_DEBUG_FS) +struct spirom_bo { + struct amdgpu_bo *bo; + uint64_t mc_addr; + void *cpu_addr; +}; +#endif + struct psp_context { struct amdgpu_device *adev; struct psp_ring km_ring; @@ -409,6 +430,9 @@ struct psp_context { char *vbflash_tmp_buf; size_t vbflash_image_size; bool vbflash_done; +#if defined(CONFIG_DEBUG_FS) + struct spirom_bo *spirom_dump_trip; +#endif }; struct amdgpu_psp_funcs { @@ -467,6 +491,10 @@ struct amdgpu_psp_funcs { ((psp)->funcs->update_spirom ? \ (psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL) +#define psp_dump_spirom(psp, fw_pri_mc_addr) \ + ((psp)->funcs->dump_spirom ? \ + (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL) + #define psp_vbflash_status(psp) \ ((psp)->funcs->vbflash_stat ? \ (psp)->funcs->vbflash_stat((psp)) : -EINVAL) @@ -578,6 +606,7 @@ int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id, bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev); int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val, enum psp_reg_prog_id id); +void amdgpu_psp_debugfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index f40b35f7f679..dc07936d2fcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2889,6 +2889,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) return -EINVAL; } + return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit); } @@ -2903,7 +2904,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, &adev->psp.ras_context.ras->eeprom_control; enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; int ret = 0; - uint32_t i; + uint32_t i = 0; if (!con || !con->eh_data || !bps || pages <= 0) return 0; @@ -2924,34 +2925,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, mutex_lock(&con->recovery_lock); if (from_rom) { - for (i = 0; i < pages; i++) { - if (control->ras_num_recs - i >= adev->umc.retire_unit) { - if ((bps[i].address == bps[i + 1].address) && - (bps[i].mem_channel == bps[i + 1].mem_channel)) { - //deal with retire_unit records a time - ret = __amdgpu_ras_convert_rec_array_from_rom(adev, - &bps[i], &err_data, nps); - if (ret) - goto free; - i += (adev->umc.retire_unit - 1); + /* there is no pa recs in V3, so skip pa recs processing */ + if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + for (i = 0; i < pages; i++) { + if (control->ras_num_recs - i >= adev->umc.retire_unit) { + if ((bps[i].address == bps[i + 1].address) && + (bps[i].mem_channel == bps[i + 1].mem_channel)) { + /* deal with retire_unit records a time */ + ret = __amdgpu_ras_convert_rec_array_from_rom(adev, + &bps[i], &err_data, nps); + if (ret) + control->ras_num_bad_pages -= adev->umc.retire_unit; + i += (adev->umc.retire_unit - 1); + } else { + break; + } } else { break; } - } else { - break; } } for (; i < pages; i++) { ret = __amdgpu_ras_convert_rec_from_rom(adev, &bps[i], &err_data, nps); if (ret) - goto free; + control->ras_num_bad_pages -= adev->umc.retire_unit; } } else { ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages); } -free: if (from_rom) kfree(err_data.err_addr); mutex_unlock(&con->recovery_lock); @@ -3040,21 +3043,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) dev_err(adev->dev, "Failed to load EEPROM table records!"); } else { if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) { - for (i = 0; i < control->ras_num_recs; i++) { - if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { - if ((bps[i].address == bps[i + 1].address) && - (bps[i].mem_channel == bps[i + 1].mem_channel)) { - control->ras_num_pa_recs += adev->umc.retire_unit; - i += (adev->umc.retire_unit - 1); + /*In V3, there is no pa recs, and some cases(when address==0) may be parsed + as pa recs, so add verion check to avoid it. + */ + if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + for (i = 0; i < control->ras_num_recs; i++) { + if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { + if ((bps[i].address == bps[i + 1].address) && + (bps[i].mem_channel == bps[i + 1].mem_channel)) { + control->ras_num_pa_recs += adev->umc.retire_unit; + i += (adev->umc.retire_unit - 1); + } else { + control->ras_num_mca_recs += + (control->ras_num_recs - i); + break; + } } else { - control->ras_num_mca_recs += - (control->ras_num_recs - i); + control->ras_num_mca_recs += (control->ras_num_recs - i); break; } - } else { - control->ras_num_mca_recs += (control->ras_num_recs - i); - break; } + } else { + control->ras_num_mca_recs = control->ras_num_recs; } } @@ -3463,6 +3473,10 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) control->ras_num_pa_recs = control->ras_num_recs; + if (adev->umc.ras && + adev->umc.ras->get_retire_flip_bits) + adev->umc.ras->get_retire_flip_bits(adev); + if (control->ras_num_recs) { ret = amdgpu_ras_load_bad_pages(adev); if (ret) @@ -4484,8 +4498,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) enum ras_event_type type = RAS_EVENT_TYPE_FATAL; u64 event_id; - if (amdgpu_ras_mark_ras_event(adev, type)) + if (amdgpu_ras_mark_ras_event(adev, type)) { + dev_err(adev->dev, + "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); return; + } event_id = amdgpu_ras_acquire_event_id(adev, type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 53b71e9d8076..9c5df35f05b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2081,6 +2081,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); + amdgpu_doorbell_fini(adev); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3d9e9fdc10b4..4a72c2bbd49e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -767,6 +767,7 @@ FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK); +FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version); static struct attribute *fw_attrs[] = { &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, @@ -781,7 +782,7 @@ static struct attribute *fw_attrs[] = { &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr, - NULL + &dev_attr_pldm_fw_version.attr, NULL }; #define to_dev_attr(x) container_of(x, struct device_attribute, attr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 06fe21e15ed6..9e89c3487be5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -602,6 +602,7 @@ struct amdgpu_firmware { void *fw_buf_ptr; uint64_t fw_buf_mc; + uint32_t pldm_version; }; void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 0a1ef95b2866..8c6e55b5b967 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev, pfns[i] = err_data.err_addr[i].retired_page; } ret = i; + adev->umc.err_addr_cnt = err_data.err_addr_cnt; out: kfree(err_data.err_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 857693bcd8d4..29ce6b1d214a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -78,6 +78,18 @@ #define UMC_NPS_SHIFT 40 #define UMC_NPS_MASK 0xffULL +/* three column bits and one row bit in MCA address flip + * in bad page retirement + */ +#define RETIRE_FLIP_BITS_NUM 4 + +struct amdgpu_umc_flip_bits { + uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM]; + uint32_t flip_row_bit; + uint32_t r13_in_pa; + uint32_t bit_num; +}; + typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data); @@ -100,6 +112,7 @@ struct amdgpu_umc_ras { bool dump_addr); uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev, uint64_t mca_addr, uint64_t retired_page); + void (*get_retire_flip_bits)(struct amdgpu_device *adev); }; struct amdgpu_umc_funcs { @@ -130,6 +143,10 @@ struct amdgpu_umc { /* active mask for umc node instance */ unsigned long active_mask; + + struct amdgpu_umc_flip_bits flip_bits; + + unsigned long err_addr_cnt; }; int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 394b036be1d9..295e7186e156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -240,17 +240,17 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_put(gobj); - /* Pin the BO before generating the index, unpin in queue destroy */ - r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); + r = amdgpu_bo_reserve(db_obj->obj, true); if (r) { drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); goto unref_bo; } - r = amdgpu_bo_reserve(db_obj->obj, true); + /* Pin the BO before generating the index, unpin in queue destroy */ + r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); if (r) { drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); - goto unpin_bo; + goto unresv_bo; } switch (db_info->queue_type) { @@ -286,7 +286,8 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, unpin_bo: amdgpu_bo_unpin(db_obj->obj); - +unresv_bo: + amdgpu_bo_unreserve(db_obj->obj); unref_bo: amdgpu_bo_unref(&db_obj->obj); return r; @@ -301,7 +302,7 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) struct amdgpu_usermode_queue *queue; int r = 0; - cancel_delayed_work(&uq_mgr->resume_work); + cancel_delayed_work_sync(&uq_mgr->resume_work); mutex_lock(&uq_mgr->userq_mutex); queue = amdgpu_userq_find(uq_mgr, queue_id); @@ -311,9 +312,13 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) return -EINVAL; } amdgpu_userq_wait_for_last_fence(uq_mgr, queue); - r = amdgpu_userq_unmap_helper(uq_mgr, queue); - amdgpu_bo_unpin(queue->db_obj.obj); + r = amdgpu_bo_reserve(queue->db_obj.obj, true); + if (!r) { + amdgpu_bo_unpin(queue->db_obj.obj); + amdgpu_bo_unreserve(queue->db_obj.obj); + } amdgpu_bo_unref(&queue->db_obj.obj); + r = amdgpu_userq_unmap_helper(uq_mgr, queue); amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); @@ -389,6 +394,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) * * This will also make sure we have a valid eviction fence ready to be used. */ + mutex_lock(&adev->userq_mutex); amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); uq_funcs = adev->userq_funcs[args->in.ip_type]; @@ -451,7 +457,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) } /* don't map the queue if scheduling is halted */ - mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation && ((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) @@ -461,7 +466,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) if (!skip_map_queue) { r = amdgpu_userq_map_helper(uq_mgr, queue); if (r) { - mutex_unlock(&adev->userq_mutex); drm_file_err(uq_mgr->file, "Failed to map Queue\n"); idr_remove(&uq_mgr->userq_idr, qid); amdgpu_userq_fence_driver_free(queue); @@ -470,13 +474,13 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } } - mutex_unlock(&adev->userq_mutex); args->out.queue_id = qid; unlock: mutex_unlock(&uq_mgr->userq_mutex); + mutex_unlock(&adev->userq_mutex); return r; } @@ -746,7 +750,7 @@ amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, amdgpu_eviction_fence_signal(evf_mgr, ev_fence); if (evf_mgr->fd_closing) { - cancel_delayed_work(&uq_mgr->resume_work); + cancel_delayed_work_sync(&uq_mgr->resume_work); return; } @@ -777,24 +781,25 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) struct amdgpu_userq_mgr *uqm, *tmp; uint32_t queue_id; - cancel_delayed_work(&userq_mgr->resume_work); + cancel_delayed_work_sync(&userq_mgr->resume_work); + mutex_lock(&adev->userq_mutex); mutex_lock(&userq_mgr->userq_mutex); idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { amdgpu_userq_wait_for_last_fence(userq_mgr, queue); amdgpu_userq_unmap_helper(userq_mgr, queue); amdgpu_userq_cleanup(userq_mgr, queue, queue_id); } - mutex_lock(&adev->userq_mutex); + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { if (uqm == userq_mgr) { list_del(&uqm->list); break; } } - mutex_unlock(&adev->userq_mutex); idr_destroy(&userq_mgr->userq_idr); mutex_unlock(&userq_mgr->userq_mutex); + mutex_unlock(&adev->userq_mutex); mutex_destroy(&userq_mgr->userq_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 029cb24c28b3..fc4d0d42e223 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -181,7 +181,7 @@ void amdgpu_userq_fence_driver_destroy(struct kref *ref) unsigned long index, flags; struct dma_fence *f; - spin_lock(&fence_drv->fence_list_lock); + spin_lock_irqsave(&fence_drv->fence_list_lock, flags); list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) { f = &fence->base; @@ -193,7 +193,7 @@ void amdgpu_userq_fence_driver_destroy(struct kref *ref) list_del(&fence->link); dma_fence_put(f); } - spin_unlock(&fence_drv->fence_list_lock); + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); xa_lock_irqsave(xa, flags); xa_for_each(xa, index, xa_fence_drv) @@ -859,8 +859,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, num_fences = dma_fence_dedup_array(fences, num_fences); waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); - if (!waitq) + if (!waitq) { + r = -EINVAL; goto free_fences; + } for (i = 0, cnt = 0; i < num_fences; i++) { struct amdgpu_userq_fence_driver *fence_drv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 23b6f7a4aa4a..b03c3895897b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -709,10 +709,10 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) struct amdgpu_xcp_cfg *xcp_cfg; int i; - if (!adev->xcp_mgr) + if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg) return; - xcp_cfg = adev->xcp_mgr->xcp_cfg; + xcp_cfg = adev->xcp_mgr->xcp_cfg; for (i = 0; i < xcp_cfg->num_res; i++) { xcp_res = &xcp_cfg->xcp_res[i]; kobject_put(&xcp_res->kobj); diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index ae071985f26e..1c083304ae77 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -448,53 +448,71 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x return 0; } -static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, - int mode, - struct amdgpu_xcp_cfg *xcp_cfg) +static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, + int px_mode, int *num_xcp, + uint16_t *nps_modes) { struct amdgpu_device *adev = xcp_mgr->adev; - int max_res[AMDGPU_XCP_RES_MAX] = {}; - bool res_lt_xcp; - int num_xcp, i; - u16 nps_modes; - if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) return -EINVAL; - max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); - max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; - max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; - max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; - - switch (mode) { + switch (px_mode) { case AMDGPU_SPX_PARTITION_MODE: - num_xcp = 1; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + *num_xcp = 1; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); break; case AMDGPU_DPX_PARTITION_MODE: - num_xcp = 2; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS2_PARTITION_MODE); + *num_xcp = 2; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS2_PARTITION_MODE); break; case AMDGPU_TPX_PARTITION_MODE: - num_xcp = 3; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = 3; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); break; case AMDGPU_QPX_PARTITION_MODE: - num_xcp = 4; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = 4; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); break; case AMDGPU_CPX_PARTITION_MODE: - num_xcp = NUM_XCC(adev->gfx.xcc_mask); - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = NUM_XCC(adev->gfx.xcc_mask); + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + if (amdgpu_sriov_vf(adev)) + *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; default: return -EINVAL; } + return 0; +} + +static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int max_res[AMDGPU_XCP_RES_MAX] = {}; + bool res_lt_xcp; + int num_xcp, i, r; + u16 nps_modes; + + if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + return -EINVAL; + + max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); + max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; + max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; + + r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes); + if (r) + return r; + xcp_cfg->compatible_nps_modes = (adev->gmc.supported_nps_modes & nps_modes); xcp_cfg->num_res = ARRAY_SIZE(max_res); @@ -543,30 +561,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { struct amdgpu_device *adev = xcp_mgr->adev; - int num_xcc, num_xccs_per_xcp; + int num_xcc, num_xccs_per_xcp, r; + int num_xcp, nps_mode; + u16 supp_nps_modes; + bool comp_mode; + + nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, + &supp_nps_modes); + if (r) + return false; + comp_mode = !!(BIT(nps_mode) & supp_nps_modes); num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; + return comp_mode && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; + return comp_mode && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: - return (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == 3) && - ((num_xcc % 3) == 0); + return comp_mode && ((num_xcc % 3) == 0); case AMDGPU_QPX_PARTITION_MODE: num_xccs_per_xcp = num_xcc / 4; - return (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == 4) && - (num_xccs_per_xcp >= 2); + return comp_mode && (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX. - * (num_xcc % adev->gmc.num_mem_partitions) == 0 because - * num_compute_partitions can't be less than num_mem_partitions - */ - return ((num_xcc > 1) && - (num_xcc % adev->gmc.num_mem_partitions) == 0); + return comp_mode && (num_xcc > 1); default: return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9db2bde5c59d..c233edf60569 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1153,6 +1153,12 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } break; + case IP_VERSION(9, 5, 0): + if (adev->gfx.mec_fw_version >= 21) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; + } + break; default: break; } @@ -1267,6 +1273,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) } } +/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1 + * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain + * bit in SET_RESOURCES + */ +static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id) +{ + uint32_t data; + + if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) + return; + + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1); + data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); + WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data); +} + static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, int xcc_id) { @@ -1311,6 +1333,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); + gfx_v9_4_3_xcc_init_sq(adev, xcc_id); } static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) @@ -1323,6 +1346,20 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) adev->gfx.config.db_debug2 = RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + /* ToDo: GC 9.4.4 */ + case IP_VERSION(9, 4, 3): + if (adev->gfx.mec_fw_version >= 184) + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + break; + case IP_VERSION(9, 5, 0): + if (adev->gfx.mec_fw_version >= 23) + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + break; + default: + break; + } + for (i = 0; i < num_xcc; i++) gfx_v9_4_3_xcc_constants_init(adev, i); } @@ -3452,9 +3489,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) { - /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/ - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && - adev->gfx.mec_fw_version >= 0x0000009b) + if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) return true; else dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 917d894a1316..72211409227b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -748,6 +748,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; + /* The mall_size is already calculated as mall_size_per_umc * num_umc. + * However, for gfx1151, which features a 2-to-1 UMC mapping, + * the result must be multiplied by 2 to determine the actual mall size. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 5, 1): + adev->gmc.mall_size *= 2; + break; + default: + break; + } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 53050176c244..282197f4ffb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1502,7 +1502,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; - adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) adev->umc.ras = &umc_v12_0_ras; break; @@ -2072,6 +2071,9 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; } static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index 6f73033d78b5..cb94bd71300f 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -28,11 +28,13 @@ #include "soc15d.h" #include "jpeg_v4_0_3.h" #include "jpeg_v5_0_1.h" +#include "mmsch_v5_0.h" #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev); static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, @@ -163,14 +165,9 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 11 * jpeg_inst; } else { - if (j < 4) - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 4 + j + 32 * jpeg_inst; - else - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 8 + j + 32 * jpeg_inst; + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 2 + j + 32 * jpeg_inst; } sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, @@ -237,7 +234,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) int i, j, r, jpeg_inst; if (amdgpu_sriov_vf(adev)) { - /* jpeg_v5_0_1_start_sriov(adev); */ + r = jpeg_v5_0_1_start_sriov(adev); + if (r) + return r; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; @@ -291,8 +291,10 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) cancel_delayed_work_sync(&adev->jpeg.idle_work); - if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) - ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + if (!amdgpu_sriov_vf(adev)) { + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) + ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + } return ret; } @@ -422,6 +424,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring) reg_offset); } +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw, item_offset; + uint32_t init_status; + int i, j, jpeg_inst; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + end.cmd_header.command_type = + MMSCH_COMMAND__END; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + jpeg_inst = GET_INST(JPEG, i); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + item_offset = header.total_size; + + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + table_size = 0; + + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4); + + if (j < 5) { + header.mjpegdec0[j].table_offset = item_offset; + header.mjpegdec0[j].init_status = 0; + header.mjpegdec0[j].table_size = table_size; + } else { + header.mjpegdec1[j - 5].table_offset = item_offset; + header.mjpegdec1[j - 5].init_status = 0; + header.mjpegdec1[j - 5].table_size = table_size; + } + header.total_size += table_size; + item_offset += table_size; + } + + MMSCH_V5_0_INSERT_END(); + + /* send init table to MMSCH */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + init_status = + ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status; + while (resp != expected) { + resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP); + + if (resp != 0) + break; + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && + init_status != MMSCH_VF_ENGINE_STATUS__PASS) + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", + resp, init_status); + + } + return 0; +} + /** * jpeg_v5_0_1_start - start JPEG block * @@ -581,6 +696,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, struct amdgpu_device *adev = ip_block->adev; int ret; + if (amdgpu_sriov_vf(adev)) { + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->jpeg.cur_state) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h new file mode 100644 index 000000000000..6f749814929f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h @@ -0,0 +1,144 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V5_0_H__ +#define __MMSCH_V5_0_H__ + +#include "amdgpu_vcn.h" + +#define MMSCH_VERSION_MAJOR 5 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +#define RB_ENABLED (1 << 0) +#define RB4_ENABLED (1 << 1) + +#define MMSCH_VF_ENGINE_STATUS__PASS 0x1 + +#define MMSCH_VF_MAILBOX_RESP__OK 0x1 +#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3 +#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4 +#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5 + +enum mmsch_v5_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v5_0_table_info { + uint32_t init_status; + uint32_t table_offset; + uint32_t table_size; +}; + +struct mmsch_v5_0_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_v5_0_table_info vcn0; + struct mmsch_v5_0_table_info mjpegdec0[5]; + struct mmsch_v5_0_table_info mjpegdec1[5]; +}; + +struct mmsch_v5_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v5_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v5_0_cmd_direct_write { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v5_0_cmd_direct_read_modify_write { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v5_0_cmd_direct_polling { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v5_0_cmd_end { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v5_0_cmd_indirect_write { + struct mmsch_v5_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \ + size_dw = size / 4; \ + direct_rd_mod_wt.cmd_header.reg_offset = reg; \ + direct_rd_mod_wt.mask_value = mask; \ + direct_rd_mod_wt.write_data = data; \ + memcpy((void *)table_loc, &direct_rd_mod_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_write); \ + size_dw = size / 4; \ + direct_wt.cmd_header.reg_offset = reg; \ + direct_wt.reg_value = value; \ + memcpy((void *)table_loc, &direct_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \ + size_dw = size / 4; \ + direct_poll.cmd_header.reg_offset = reg; \ + direct_poll.mask_value = mask; \ + direct_poll.wait_value = wait; \ + memcpy((void *)table_loc, &direct_poll, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_END() { \ + size = sizeof(struct mmsch_v5_0_cmd_end); \ + size_dw = size / 4; \ + memcpy((void *)table_loc, &end, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index f8af2cc63446..df612fd9cc50 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -71,15 +71,6 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin"); /* Retry times for vmbx ready wait */ #define PSP_VMBX_POLLING_LIMIT 3000 -/* VBIOS gfl defines */ -#define MBOX_READY_MASK 0x80000000 -#define MBOX_STATUS_MASK 0x0000FFFF -#define MBOX_COMMAND_MASK 0x00FF0000 -#define MBOX_READY_FLAG 0x80000000 -#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2 -#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3 -#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4 - /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 @@ -741,7 +732,8 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd) /* Ring the doorbell */ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1); - if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE) + if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE || + cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else @@ -797,6 +789,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp, return 0; } +static int psp_v13_0_dump_spirom(struct psp_context *psp, + uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + /* Confirm PSP is ready to start */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, false); + if (ret) { + dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); + return ret; + } + + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr)); + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO); + if (ret) + return ret; + + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr)); + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI); + if (ret) + return ret; + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE); + + return ret; +} + static int psp_v13_0_vbflash_status(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -929,6 +952,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw, .read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw, .update_spirom = psp_v13_0_update_spirom, + .dump_spirom = psp_v13_0_dump_spirom, .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index da00d6b3b6a3..e590cbdd8de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, umc_v12_0_reset_error_count(adev); } +static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; + uint32_t vram_type = adev->gmc.vram_type; + struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits); + + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + + /* default setting */ + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT; + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT; + flip_bits->flip_row_bit = 13; + flip_bits->bit_num = 4; + flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT; + + if (nps == AMDGPU_NPS2_PARTITION_MODE) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT; + } else if (nps == AMDGPU_NPS4_PARTITION_MODE) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT; + } + + switch (vram_type) { + case AMDGPU_VRAM_TYPE_HBM: + /* other nps modes are taken as nps1 */ + if (nps == AMDGPU_NPS2_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + else if (nps == AMDGPU_NPS4_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + + break; + case AMDGPU_VRAM_TYPE_HBM3E: + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + flip_bits->flip_row_bit = 12; + + if (nps == AMDGPU_NPS2_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + else if (nps == AMDGPU_NPS4_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT; + + break; + default: + dev_warn(adev->dev, + "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n"); + break; + } + + adev->umc.retire_unit = 0x1 << flip_bits->bit_num; +} + static int umc_v12_0_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, struct ta_ras_query_address_input *addr_in, struct ta_ras_query_address_output *addr_out, bool dump_addr) { - uint32_t col, col_lower, row, row_lower, bank; + uint32_t col, col_lower, row, row_lower, row_high, bank; uint32_t channel_index = 0, umc_inst = 0; - uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS]; + uint32_t i, bit_num, retire_unit, *flip_bits; uint64_t soc_pa, column, err_addr; struct ta_ras_query_address_output addr_out_tmp; struct ta_ras_query_address_output *paddr_out; - enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; int ret = 0; if (!addr_out) @@ -211,53 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev, umc_inst = addr_in->ma.umc_inst; } - loop_bits[0] = UMC_V12_0_PA_C2_BIT; - loop_bits[1] = UMC_V12_0_PA_C3_BIT; - loop_bits[2] = UMC_V12_0_PA_C4_BIT; - loop_bits[3] = UMC_V12_0_PA_R13_BIT; - - if (adev->gmc.gmc_funcs->query_mem_partition_mode) - nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - - /* other nps modes are taken as nps1 */ - if (nps == AMDGPU_NPS2_PARTITION_MODE) { - loop_bits[0] = UMC_V12_0_PA_CH5_BIT; - loop_bits[1] = UMC_V12_0_PA_C2_BIT; - loop_bits[2] = UMC_V12_0_PA_B1_BIT; - loop_bits[3] = UMC_V12_0_PA_R12_BIT; - } - - if (nps == AMDGPU_NPS4_PARTITION_MODE) { - loop_bits[0] = UMC_V12_0_PA_CH4_BIT; - loop_bits[1] = UMC_V12_0_PA_CH5_BIT; - loop_bits[2] = UMC_V12_0_PA_B0_BIT; - loop_bits[3] = UMC_V12_0_PA_R11_BIT; - } + flip_bits = adev->umc.flip_bits.flip_bits_in_pa; + bit_num = adev->umc.flip_bits.bit_num; + retire_unit = adev->umc.retire_unit; soc_pa = paddr_out->pa.pa; channel_index = paddr_out->pa.channel_idx; /* clear loop bits in soc physical address */ - for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) - soc_pa &= ~BIT_ULL(loop_bits[i]); + for (i = 0; i < bit_num; i++) + soc_pa &= ~BIT_ULL(flip_bits[i]); paddr_out->pa.pa = soc_pa; /* get column bit 0 and 1 in mca address */ col_lower = (err_addr >> 1) & 0x3ULL; - /* MA_R13_BIT will be handled later */ + /* extra row bit will be handled later */ row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL; + row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit); + + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) { + row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL; + /* it's 2.25GB in each channel, from MCA address to PA + * [R14 R13] is converted if the two bits value are 0x3, + * get them from PA instead of MCA address. + */ + row_lower |= (row_high << 13); + } if (!err_data && !dump_addr) goto out; /* loop for all possibilities of retired bits */ - for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) { + for (column = 0; column < retire_unit; column++) { soc_pa = paddr_out->pa.pa; - for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) - soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]); + for (i = 0; i < bit_num; i++) + soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]); col = ((column & 0x7) << 2) | col_lower; - /* add row bit 13 */ - row = ((column >> 3) << 13) | row_lower; + /* handle extra row bit */ + if (bit_num == RETIRE_FLIP_BITS_NUM) + row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) | + row_lower; if (dump_addr) dev_info(adev->dev, @@ -435,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank bank->regs[ACA_REG_IDX_ADDR]); ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); - count = ext_error_code == 0 ? - ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; + if (umc_v12_0_is_deferred_error(adev, status)) + count = ext_error_code == 0 ? + adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL; + else + count = ext_error_code == 0 ? + ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; return aca_error_cache_log_bank_error(handle, &info, err_type, count); } @@ -476,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, uint64_t err_addr, pa_addr = 0; struct ras_ecc_err *ecc_err; struct ta_ras_query_address_output addr_out; - enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; - uint32_t shift_bit = UMC_V12_0_PA_C4_BIT; + uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2]; int count, ret, i; hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); @@ -522,14 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT; ecc_err->channel_idx = addr_out.pa.channel_idx; - if (adev->gmc.gmc_funcs->query_mem_partition_mode) - nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - - if (nps == AMDGPU_NPS2_PARTITION_MODE) - shift_bit = UMC_V12_0_PA_B1_BIT; - if (nps == AMDGPU_NPS4_PARTITION_MODE) - shift_bit = UMC_V12_0_PA_B0_BIT; - /* If converted pa_pfn is 0, use pa C4 pfn. */ if (!ecc_err->pa_pfn) ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT; @@ -675,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { .update_ecc_status = umc_v12_0_update_ecc_status, .convert_ras_err_addr = umc_v12_0_convert_error_address, .get_die_id_from_pa = umc_v12_0_get_die_id, + .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index 056bbc038312..63b7e7254526 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -55,8 +55,6 @@ #define UMC_V12_0_NA_MAP_PA_NUM 8 /* R13 bit shift should be considered, double the number */ #define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) -/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */ -#define UMC_V12_0_RETIRE_LOOP_BITS 4 /* column bits in SOC physical address */ #define UMC_V12_0_PA_C2_BIT 15 @@ -64,6 +62,7 @@ #define UMC_V12_0_PA_C4_BIT 21 /* row bits in SOC physical address */ #define UMC_V12_0_PA_R0_BIT 22 +#define UMC_V12_0_PA_R10_BIT 32 #define UMC_V12_0_PA_R11_BIT 33 #define UMC_V12_0_PA_R12_BIT 34 #define UMC_V12_0_PA_R13_BIT 35 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index ed00d35039c1..a09f9a2dd471 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1034,6 +1034,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL); + return 0; } @@ -1216,6 +1220,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 60ee6e02e6ac..8e843011703c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -30,6 +30,7 @@ #include "soc15_hw_ip.h" #include "vcn_v2_0.h" #include "vcn_v4_0_3.h" +#include "mmsch_v5_0.h" #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" @@ -39,6 +40,7 @@ #include <drm/drm_drv.h> +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, @@ -126,7 +128,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * vcn_inst; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); @@ -143,6 +152,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + vcn_v5_0_0_alloc_ip_dump(adev); return amdgpu_vcn_sysfs_reset_mask_init(adev); @@ -172,6 +187,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) drm_dev_exit(idx); } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -204,24 +222,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, r, vcn_inst; - if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) - adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - vcn_inst = GET_INST(VCN, i); - ring = &adev->vcn.inst[i].ring_enc[0]; - - if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 11 * vcn_inst), - adev->vcn.inst[i].aid_id); - - /* Re-init fw_shared, if required */ - vcn_v5_0_1_fw_shared_init(adev, i); - - r = amdgpu_ring_test_helper(ring); + if (amdgpu_sriov_vf(adev)) { + r = vcn_v5_0_1_start_sriov(adev); if (r) return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v5_0_1_unified_ring_set_wptr(ring); + ring->sched.ready = true; + } + } else { + if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) + adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst), + adev->vcn.inst[i].aid_id); + + /* Re-init fw_shared, if required */ + vcn_v5_0_1_fw_shared_init(adev, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } } return 0; @@ -663,6 +695,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, return 0; } +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + int i, vcn_inst; + struct amdgpu_ring *ring_enc; + uint64_t cache_addr; + uint64_t rb_enc_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + uint32_t enabled_vcn; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + vcn_v5_0_1_fw_shared_init(adev, vcn_inst); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + + offset = 0; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET1), 0); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET2), 0); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); + + fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; + rb_setup = &fw_shared->rb_setup; + + ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; + ring_enc->wptr = 0; + rb_enc_addr = ring_enc->gpu_addr; + + rb_setup->is_rb_enabled_flags |= RB_ENABLED; + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + MMSCH_V5_0_INSERT_END(); + + header.vcn0.init_status = 0; + header.vcn0.table_offset = header.total_size; + header.vcn0.table_size = table_size; + header.total_size += table_size; + + /* Send init table to mmsch */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + while (resp != expected) { + resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); + if (resp != 0) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; + init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status; + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ + "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); + } + } + + return 0; +} + /** * vcn_v5_0_1_start - VCN start * @@ -1103,8 +1324,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state) { + struct amdgpu_device *adev = vinst->adev; int ret = 0; + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + vinst->cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == vinst->cur_state) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 54870b4c5000..3e1ad8974797 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -175,8 +175,7 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); - /* If there is no valid PASID, it's likely a bug */ - if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + if (pasid == 0) return 0; /* Interrupt types we care about: various signals and faults. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index eb88ba8d8b01..2788a52714d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -287,8 +287,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); - /* If there is no valid PASID, it's likely a bug */ - if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + if (pasid == 0) return false; /* Interrupt types we care about: various signals and faults. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 271c567242ab..b1a6eb349bb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -31,6 +31,7 @@ #define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0) #define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1) #define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2) +#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3) static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, unsigned int buffer_size_bytes) @@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int *rlib_size, - int *over_subscription) + int *over_subscription, + int xnack_conflict) { unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int map_queue_size; @@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm, *over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT; if (gws_queue_count > 1) *over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT; + if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) + *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT; if (*over_subscription) dev_dbg(dev, "Over subscribed runlist\n"); @@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, unsigned int **rl_buffer, uint64_t *rl_gpu_buffer, unsigned int *rl_buffer_size, - int *is_over_subscription) + int *is_over_subscription, + int xnack_conflict) { struct kfd_node *node = pm->dqm->dev; struct device *dev = node->adev->dev; @@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, if (WARN_ON(pm->allocated)) return -EINVAL; - pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); + pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription, + xnack_conflict); mutex_lock(&pm->lock); @@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm, struct queue *q; struct kernel_queue *kq; int is_over_subscription; + int xnack_enabled = -1; + bool xnack_conflict = 0; rl_wptr = retval = processes_mapped = 0; + /* Check if processes set different xnack modes */ + list_for_each_entry(cur, queues, list) { + qpd = cur->qpd; + if (xnack_enabled < 0) + /* First process */ + xnack_enabled = qpd->pqm->process->xnack_enabled; + else if (qpd->pqm->process->xnack_enabled != xnack_enabled) { + /* Found a process with a different xnack mode */ + xnack_conflict = 1; + break; + } + } + retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, - &alloc_size_bytes, &is_over_subscription); + &alloc_size_bytes, &is_over_subscription, + xnack_conflict); if (retval) return retval; @@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm, dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n", pm->dqm->processes_count, pm->dqm->active_queue_count); +build_runlist_ib: /* build the run list ib packet */ list_for_each_entry(cur, queues, list) { qpd = cur->qpd; + /* group processes with the same xnack mode together */ + if (qpd->pqm->process->xnack_enabled != xnack_enabled) + continue; /* build map process packet */ if (processes_mapped >= pm->dqm->processes_count) { dev_dbg(dev, "Not enough space left in runlist IB\n"); @@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm, alloc_size_bytes); } } + if (xnack_conflict) { + /* pick up processes with the other xnack mode */ + xnack_enabled = !xnack_enabled; + xnack_conflict = 0; + goto build_runlist_ib; + } dev_dbg(dev, "Finished map process and queues to runlist\n"); if (is_over_subscription) { if (!pm->is_over_subscription) - dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n", - is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ? - " too many processes." : "", - is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ? - " too many queues." : "", - is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ? - " multiple processes using cooperative launch." : ""); + dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n", + is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ? + " too many processes" : "", + is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ? + " too many queues" : "", + is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ? + " multiple processes using cooperative launch" : "", + is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ? + " xnack on/off processes mixed on gfx9" : ""); retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index fa28c57692b8..8fa6489b6f5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -203,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, queue_type__mes_set_resources__hsa_interface_queue_hiq; packet->bitfields2.vmid_mask = res->vmid_mask; packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN) + packet->bitfields2.enb_xnack_retry_disable_check = 1; packet->bitfields7.oac_mask = res->oac_mask; packet->bitfields8.gds_heap_base = res->gds_heap_base; packet->bitfields8.gds_heap_size = res->gds_heap_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index cd8611401a66..e356a207d03c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -63,7 +63,8 @@ struct pm4_mes_set_resources { struct { uint32_t vmid_mask:16; uint32_t unmap_latency:8; - uint32_t reserved1:5; + uint32_t reserved1:4; + uint32_t enb_xnack_retry_disable_check:1; enum mes_set_resources_queue_type_enum queue_type:3; } bitfields2; uint32_t ordinal2; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index ab2a97e354da..7329b8cc2576 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -38,6 +38,7 @@ AMDGPUDM = \ amdgpu_dm_pp_smu.o \ amdgpu_dm_psr.o \ amdgpu_dm_replay.o \ + amdgpu_dm_quirks.o \ amdgpu_dm_wb.o ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8984e211dd1c..4b4e9241619f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -80,7 +80,6 @@ #include <linux/power_supply.h> #include <linux/firmware.h> #include <linux/component.h> -#include <linux/dmi.h> #include <linux/sort.h> #include <drm/display/drm_dp_mst_helper.h> @@ -374,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -866,7 +867,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, static void dmub_hpd_sense_callback(struct amdgpu_device *adev, struct dmub_notification *notify) { - DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); + drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); } /** @@ -963,7 +964,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, entry.param0, entry.param1); - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", entry.trace_code, entry.tick_count, entry.param0, entry.param1); } else break; @@ -973,7 +974,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } while (count <= DMUB_TRACE_MAX_READ); if (count > DMUB_TRACE_MAX_READ) - DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); + drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { @@ -1677,153 +1678,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -struct amdgpu_dm_quirks { - bool aux_hpd_discon; - bool support_edp0_on_dp1; -}; - -static struct amdgpu_dm_quirks quirk_entries = { - .aux_hpd_discon = false, - .support_edp0_on_dp1 = false -}; - -static int edp0_on_dp1_callback(const struct dmi_system_id *id) -{ - quirk_entries.support_edp0_on_dp1 = true; - return 0; -} - -static int aux_hpd_discon_callback(const struct dmi_system_id *id) -{ - quirk_entries.aux_hpd_discon = true; - return 0; -} - -static const struct dmi_system_id dmi_quirk_table[] = { - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), - }, - }, - {} - /* TODO: refactor this from a fixed table to a dynamic option */ -}; - -static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data) -{ - int dmi_id; - struct drm_device *dev = dm->ddev; - - dm->aux_hpd_discon_quirk = false; - init_data->flags.support_edp0_on_dp1 = false; - - dmi_id = dmi_check_system(dmi_quirk_table); - - if (!dmi_id) - return; - - if (quirk_entries.aux_hpd_discon) { - dm->aux_hpd_discon_quirk = true; - drm_info(dev, "aux_hpd_discon_quirk attached\n"); - } - if (quirk_entries.support_edp0_on_dp1) { - init_data->flags.support_edp0_on_dp1 = true; - drm_info(dev, "support_edp0_on_dp1 attached\n"); - } -} void* dm_allocate_gpu_mem( @@ -2110,7 +1964,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; - retrieve_dmi_info(&adev->dm, &init_data); + retrieve_dmi_info(&adev->dm); + if (adev->dm.edp0_on_dp1_quirk) + init_data.flags.support_edp0_on_dp1 = true; if (adev->dm.bb_from_dmub) init_data.bb_from_dmub = adev->dm.bb_from_dmub; @@ -2200,7 +2056,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) drm_err(adev_to_drm(adev), "amdgpu: failed to initialize freesync_module.\n"); } else - DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", + drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); amdgpu_dm_init_color_mod(); @@ -2222,7 +2078,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (!adev->dm.hdcp_workqueue) drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n"); else - DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); dc_init_callbacks(adev->dm.dc, &init_params); } @@ -2299,7 +2155,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #endif - DRM_DEBUG_DRIVER("KMS initialized.\n"); + drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); return 0; error: @@ -5097,7 +4953,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) drm_err(drm, "DM: Backlight registration failed!\n"); dm->backlight_dev[aconnector->bl_idx] = NULL; } else - DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); + drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); } static int initialize_plane(struct amdgpu_display_manager *dm, @@ -6749,7 +6605,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, m_pref = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { - DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); return NULL; } } @@ -6924,7 +6780,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { @@ -6944,7 +6800,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -7053,7 +6909,7 @@ create_stream_for_sink(struct drm_connector *connector, * case, we call set mode ourselves to restore the previous mode * and the modelist may not be filled in time. */ - DRM_DEBUG_DRIVER("No preferred mode found\n"); + drm_dbg_driver(dev, "No preferred mode found\n"); } else if (aconnector) { recalculate_timing = amdgpu_freesync_vid_mode && is_freesync_video_mode(&mode, aconnector); @@ -9201,7 +9057,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: @@ -9209,7 +9065,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); } } @@ -10836,6 +10692,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; + struct amdgpu_device *adev = dm->adev; int ret = 0; /* @@ -10889,7 +10746,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, */ if (!new_stream) { - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); ret = -ENOMEM; goto fail; @@ -10927,7 +10784,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", new_crtc_state->mode_changed); } } @@ -10965,7 +10822,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER( + drm_dbg_driver(adev_to_drm(adev), "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); @@ -10986,7 +10843,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (ret) goto fail; - DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", crtc->base.id); /* i.e. reset mode */ @@ -12844,7 +12701,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( payload->address, payload->length, p_notify->result); } - *operation_result = AUX_RET_ERROR_INVALID_REPLY; + *operation_result = p_notify->result; goto out; } @@ -12853,7 +12710,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( /* The reply is stored in the top nibble of the command. */ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - if (!payload->write && p_notify->aux_reply.length) + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 4b615071e93b..d7d92f9911e4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -619,6 +619,13 @@ struct amdgpu_display_manager { bool aux_hpd_discon_quirk; /** + * @edp0_on_dp1_quirk: + * + * quirk for platforms that put edp0 on DP1. + */ + bool edp0_on_dp1_quirk; + + /** * @dpia_aux_lock: * * Guards access to DPIA AUX @@ -1068,4 +1075,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector); void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector); int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector); +void retrieve_dmi_info(struct amdgpu_display_manager *dm); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index d19aea595722..25e8befbcc47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, */ if (payload.write && result >= 0) { if (result) { - /*one byte indicating partially written bytes. Force 0 to retry*/ - drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); - result = 0; + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + result = payload.data[0]; } else if (!payload.reply[0]) /*I2C_ACK|AUX_ACK*/ result = msg->size; @@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } - drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); } if (payload.reply[0]) - drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", payload.reply[0]); return result; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c new file mode 100644 index 000000000000..1da07ebf9217 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/dmi.h> + +#include "amdgpu.h" +#include "amdgpu_dm.h" + +struct amdgpu_dm_quirks { + bool aux_hpd_discon; + bool support_edp0_on_dp1; +}; + +static struct amdgpu_dm_quirks quirk_entries = { + .aux_hpd_discon = false, + .support_edp0_on_dp1 = false +}; + +static int edp0_on_dp1_callback(const struct dmi_system_id *id) +{ + quirk_entries.support_edp0_on_dp1 = true; + return 0; +} + +static int aux_hpd_discon_callback(const struct dmi_system_id *id) +{ + quirk_entries.aux_hpd_discon = true; + return 0; +} + +static const struct dmi_system_id dmi_quirk_table[] = { + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, + {} + /* TODO: refactor this from a fixed table to a dynamic option */ +}; + +void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + struct drm_device *dev = dm->ddev; + int dmi_id; + + dm->aux_hpd_discon_quirk = false; + dm->edp0_on_dp1_quirk = false; + + dmi_id = dmi_check_system(dmi_quirk_table); + + if (!dmi_id) + return; + + if (quirk_entries.aux_hpd_discon) { + dm->aux_hpd_discon_quirk = true; + drm_info(dev, "aux_hpd_discon_quirk attached\n"); + } + if (quirk_entries.support_edp0_on_dp1) { + dm->edp0_on_dp1_quirk = true; + drm_info(dev, "support_edp0_on_dp1 attached\n"); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index afd298e69d4e..56d011a1323c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -36,6 +36,7 @@ #include "resource.h" #include "dc_state.h" #include "dc_state_priv.h" +#include "dc_plane.h" #include "dc_plane_priv.h" #include "dc_stream_priv.h" @@ -440,9 +441,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ - if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->ctx->dce_version > DCE_VERSION_MAX) { + if (dc->optimized_required || dc->wm_optimized_required) { + stream->adjust.timing_adjust_pending = true; return false; + } + } dc_exit_ips_for_hw_access(dc); @@ -2330,11 +2334,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params for (i = 0; i < params->stream_count; i++) { struct dc_stream_state *stream = params->streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); + struct dc_sink *sink = stream->sink; /* revalidate streams */ - res = dc_validate_stream(dc, stream); - if (res != DC_OK) - return res; + if (!dc_is_virtual_signal(sink->sink_signal)) { + res = dc_validate_stream(dc, stream); + if (res != DC_OK) + return res; + } + dc_stream_log(dc, stream); @@ -3240,7 +3248,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->crtc_timing_adjust) { if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || - stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || + stream->adjust.timing_adjust_pending) update->crtc_timing_adjust->timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; update->crtc_timing_adjust->timing_adjust_pending = false; @@ -3320,7 +3329,7 @@ static void backup_planes_and_stream_state( return; for (i = 0; i < status->plane_count; i++) { - scratch->plane_states[i] = *status->plane_states[i]; + dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]); } scratch->stream_state = *stream; } @@ -3336,10 +3345,7 @@ static void restore_planes_and_stream_state( return; for (i = 0; i < status->plane_count; i++) { - /* refcount will always be valid, restore everything else */ - struct kref refcount = status->plane_states[i]->refcount; - *status->plane_states[i] = scratch->plane_states[i]; - status->plane_states[i]->refcount = refcount; + dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); } *stream = scratch->stream_state; } @@ -4244,12 +4250,6 @@ static void commit_planes_for_stream(struct dc *dc, if (update_type == UPDATE_TYPE_FAST) continue; - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { - /*turn off triple buffer for full update*/ - dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); - } stream_status = stream_get_status(context, pipe_ctx->stream); @@ -4258,6 +4258,25 @@ static void commit_planes_for_stream(struct dc *dc, dc, pipe_ctx->stream, stream_status->plane_count, context); } } + + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->plane_state) + continue; + + /* Full fe update*/ + if (update_type == UPDATE_TYPE_FAST) + continue; + + ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { + /*turn off triple buffer for full update*/ + dc->hwss.program_triplebuffer( + dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + } + } + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 13559fe56062..3da25bd8b578 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3911,6 +3911,10 @@ enum dc_status resource_map_pool_resources( if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) return DC_FAIL_DP_LINK_BANDWIDTH; + + dc->link_srv->dp_decide_tunnel_settings(stream, + &pipe_ctx->link_config.dp_tunnel_settings); + if (dc->link_srv->dp_get_encoding_format( &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { pipe_ctx->stream_res.hpo_dp_stream_enc = @@ -5521,6 +5525,14 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx) return &pipe_ctx->plane_res.scl_data.dscl_prog_data; } +static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params) +{ + if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config) + context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params); + + return true; +} + void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options) { dml2_options->callbacks.dc = dc; @@ -5540,6 +5552,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status; dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id; dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase; + dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache; dml2_options->svp_pstate.callbacks.dc = dc; dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index e6fcc21bb9bc..922f23557f5d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc) ***************************************************************************** */ const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state) + const struct dc_plane_state *plane_state, + union dc_plane_status_update_flags flags) { const struct dc_plane_status *plane_status; struct dc *dc; @@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - if (pipe_ctx->plane_state) + if (pipe_ctx->plane_state && flags.bits.address) pipe_ctx->plane_state->status.is_flip_pending = false; break; @@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - dc->hwss.update_pending_status(pipe_ctx); + if (flags.bits.address) + dc->hwss.update_pending_status(pipe_ctx); } return plane_status; @@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state, dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling); } } + +void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src) +{ + struct kref temp_refcount; + + /* backup persistent info */ + memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref)); + + /* copy all configuration information */ + memcpy(dst, src, sizeof(struct dc_plane_state)); + + /* restore persistent info */ + memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2230e36c4f12..1d917be36fc4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -53,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.331" +#define DC_VER "3.2.334" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 6115b5364394..afbcf866520e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -39,6 +39,7 @@ #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger +#define GPINT_RETRY_NUM 20 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, struct dmub_srv *dmub) @@ -207,7 +208,7 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr return false; do { - status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i); + status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); /* Requeue the command. */ @@ -247,6 +248,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } else { res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); } + + if (res) + res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK; } return res; @@ -1885,11 +1889,14 @@ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struc if (command_code == DMUB_GPINT__INVALID_COMMAND) return; - // send gpint commands and wait for ack - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, - (uint16_t)(output->ips_mode), - &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->residency_percent = 0; + for (i = 0; i < GPINT_RETRY_NUM; i++) { + // false could mean GPINT timeout, in which case we should retry + if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, + (uint16_t)(output->ips_mode), &output->residency_percent, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + break; + udelay(100); + } if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, (uint16_t)(output->ips_mode), diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 77c87ad57220..0bad8304ccf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -159,6 +159,11 @@ struct dc_link_settings { uint8_t link_rate_set; }; +struct dc_tunnel_settings { + bool should_enable_dp_tunneling; + bool should_use_dp_bw_allocation; +}; + union dc_dp_ffe_preset { struct { uint8_t level : 4; @@ -943,10 +948,20 @@ union dpia_info { uint8_t raw; }; +/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */ +union usb4_driver_bw_cap { + struct { + uint8_t rsvd :7; + uint8_t driver_bw_alloc_support :1; + } bits; + uint8_t raw; +}; + /* DP Tunneling over USB4 */ struct dpcd_usb4_dp_tunneling_info { union dp_tun_cap_support dp_tun_cap; union dpia_info dpia_info; + union usb4_driver_bw_cap driver_bw_cap; uint8_t usb4_driver_id; uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; @@ -1486,5 +1501,11 @@ struct dp_trace { # ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED # define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3) # endif +# ifndef DPTX_BW_ALLOC_UNMASK_IRQ +# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6) +# endif +# ifndef DPTX_BW_ALLOC_MODE_ENABLE +# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7) +# endif #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index e9413685ed4f..14feb843e694 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -28,13 +28,24 @@ #include "dc_hw_types.h" +union dc_plane_status_update_flags { + struct { + uint32_t address : 1; + } bits; + uint32_t raw; +}; + struct dc_plane_state *dc_create_plane_state(const struct dc *dc); const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state); + const struct dc_plane_state *plane_state, + union dc_plane_status_update_flags flags); void dc_plane_state_retain(struct dc_plane_state *plane_state); void dc_plane_state_release(struct dc_plane_state *plane_state); void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state, bool clear_tiling); + +void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src); + #endif /* _DC_PLANE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index d9159ca55412..92f0a099d089 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 1, + .do_urgent_latency_adjustment = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, }; void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index ed75319a07d5..d47cacfdb695 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -916,7 +916,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm } //TODO : Could be possibly moved to a common helper layer. -static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id) +static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id) { int i, j; @@ -924,10 +924,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str return false; for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { - *plane_id = (i << 16) | j; - return true; + if (context->streams[i]->stream_id == stream_id) { + for (j = 0; j < context->stream_status[i].plane_count; j++) { + if (context->stream_status[i].plane_states[j] == plane) { + *plane_id = (i << 16) | j; + return true; + } } } } @@ -950,14 +952,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d return location; } -static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, +unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context) { unsigned int plane_id; int i = 0; int location = -1; - if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) { + if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { ASSERT(false); return -1; } @@ -1043,7 +1045,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; } else { for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { - disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context); + disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; @@ -1054,7 +1056,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; - if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) + if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; /* apply forced pstate policy */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 069b939c672a..73a013be1e48 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -11,6 +11,7 @@ struct dc_state; struct dcn_watermarks; union dcn_watermark_set; struct pipe_ctx; +struct dc_plane_state; struct dml2_context; struct dml2_configuration_options; @@ -25,4 +26,5 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se void dml21_map_hw_resources(struct dml2_context *dml_ctx); void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config); void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled); +unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index ed6584535e89..208d3651b6ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -12,6 +12,8 @@ #include "dml21_translation_helper.h" #include "dml2_dc_resource_mgmt.h" +#define INVALID -1 + static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { *dml_ctx = vzalloc(sizeof(struct dml2_context)); @@ -208,10 +210,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta } } +static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params) +{ + int dc_plane_idx = 0; + int dml_prog_idx, stream_idx, plane_idx; + struct dml2_per_plane_programming *pln_prog = NULL; + + for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) { + for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) { + dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context); + if (dml_prog_idx == INVALID) { + continue; + } + pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; + mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid; + mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0; + mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1; + mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache; + mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1; + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0, + pln_prog->mcache_allocation.mcache_x_offsets_plane0, + sizeof(int) * (DML2_MAX_MCACHES + 1)); + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1, + pln_prog->mcache_allocation.mcache_x_offsets_plane1, + sizeof(int) * (DML2_MAX_MCACHES + 1)); + dc_plane_idx++; + } + } +} + static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) { bool result = false; struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming; + struct dc_mcache_params mcache_params[MAX_PLANES] = {0}; memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); @@ -246,6 +278,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state); /* if subvp phantoms are present, expand them into dc context */ dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx); + + if (in_dc->res_pool->funcs->program_mcache_pipe_config) { + //Prepare mcache params for each plane based on mcache output from DML + dml21_prepare_mcache_params(dml_ctx, context, mcache_params); + + //populate mcache regs to each pipe + dml_ctx->config.callbacks.allocate_mcache(context, mcache_params); + } } /* Copy DML CLK, WM and REG outputs to bandwidth context */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h index b2075b8c363b..42e715024bc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h @@ -8,6 +8,7 @@ #include "os_types.h" #include "dml_top_soc_parameter_types.h" +#include "dml_top_display_cfg_types.h" struct dc; struct dc_state; @@ -65,4 +66,67 @@ struct socbb_ip_params_external { struct dml2_ip_capabilities ip_params; struct dml2_soc_bb soc_bb; }; + +/*mcache parameters decided by dml*/ +struct dc_mcache_params { + bool valid; + /* + * For iMALL, dedicated mall mcaches are required (sharing of last + * slice possible), for legacy phantom or phantom without return + * the only mall mcaches need to be valid. + */ + bool requires_dedicated_mall_mcache; + unsigned int num_mcaches_plane0; + unsigned int num_mcaches_plane1; + /* + * Generally, plane0/1 slices must use a disjoint set of caches + * but in some cases the final segement of the two planes can + * use the same cache. If plane0_plane1 is set, then this is + * allowed. + * + * Similarly, the caches allocated to MALL prefetcher are generally + * disjoint, but if mall_prefetch is set, then the final segment + * between the main and the mall pixel requestor can use the same + * cache. + * + * Note that both bits may be set at the same time. + */ + struct { + bool mall_comb_mcache_p0; + bool mall_comb_mcache_p1; + bool plane0_plane1; + } last_slice_sharing; + /* + * A plane is divided into vertical slices of mcaches, + * which wrap on the surface width. + * + * For example, if the surface width is 7680, and split into + * three slices of equal width, the boundary array would contain + * [2560, 5120, 7680] + * + * The assignments are + * 0 = [0 .. 2559] + * 1 = [2560 .. 5119] + * 2 = [5120 .. 7679] + * 0 = [7680 .. INF] + * The final element implicitly is the same as the first, and + * at first seems invalid since it is never referenced (since) + * it is outside the surface. However, its useful when shifting + * (see below). + * + * For any given valid mcache assignment, a shifted version, wrapped + * on the surface width boundary is also assumed to be valid. + * + * For example, shifting [2560, 5120, 7680] by -50 results in + * [2510, 5170, 7630]. + * + * The assignments are now: + * 0 = [0 .. 2509] + * 1 = [2510 .. 5169] + * 2 = [5170 .. 7629] + * 0 = [7630 .. INF] + */ + int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1]; + int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1]; +}; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 785226945699..5100f269368e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -40,6 +40,7 @@ struct dc_sink; struct dc_stream_state; struct resource_context; struct display_stream_compressor; +struct dc_mcache_params; // Configuration of the MALL on the SoC struct dml2_soc_mall_info { @@ -107,6 +108,7 @@ struct dml2_dc_callbacks { unsigned int (*get_max_flickerless_instant_vtotal_increase)( struct dc_stream_state *stream, bool is_gaming); + bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params); }; struct dml2_dc_svp_callbacks { diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 1236e0f9a256..712aff7e17f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes( enum dc_cursor_color_format color_format = cursor_attributes->color_format; int cur_rom_en = 0; - // DCN4 should always do Cursor degamma for Cursor Color modes if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index 5ed195377a6c..baed31611477 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected, .hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar, .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, - .hubp_clear_tiling = hubp2_clear_tiling, + .hubp_clear_tiling = hubp401_clear_tiling, }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 5e78b553adbd..858288c3b1ac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2053,7 +2053,7 @@ void dcn20_program_front_end_for_ctx( for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + if (pipe->plane_state) { ASSERT(!pipe->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 63077c1fad85..c814d957305a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1550,7 +1550,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; const struct dc *dc = pipe_ctx->stream->link->dc; - if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false) return false; // Not necessary for MST configurations diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index ae7194da5987..c4177a9a662f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -525,11 +525,11 @@ bool dcn401_program_rmcm_luts( enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable; enum hubp_3dlut_fl_mode mode; enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_format format; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r; - enum hubp_3dlut_fl_width width; + enum hubp_3dlut_fl_format format = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; + enum hubp_3dlut_fl_width width = 0; struct dc *dc = hubp->ctx->dc; bool bypass_rmcm_3dlut = false; @@ -654,9 +654,9 @@ void dcn401_populate_mcm_luts(struct dc *dc, enum hubp_3dlut_fl_mode mode; enum hubp_3dlut_fl_width width = 0; enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE; enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE; enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE; @@ -2260,9 +2260,9 @@ void dcn401_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || - (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || - pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || @@ -2361,7 +2361,7 @@ void dcn401_program_front_end_for_ctx( for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + if (pipe->plane_state) { if (pipe->plane_state->triplebuffer_flips) BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 051bd83680d1..0cf349cafb3e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -65,6 +65,7 @@ struct resource_pool; struct dc_state; struct resource_context; struct clk_bw_params; +struct dc_mcache_params; struct resource_funcs { enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); @@ -220,6 +221,8 @@ struct resource_funcs { unsigned int (*get_max_hw_cursor_size)(const struct dc *dc, struct dc_state *state, const struct dc_stream_state *stream); + bool (*program_mcache_pipe_config)(struct dc_state *context, + const struct dc_mcache_params *mcache_params); }; struct audio_support{ @@ -384,7 +387,9 @@ struct link_resource { struct link_config { struct dc_link_settings dp_link_settings; + struct dc_tunnel_settings dp_tunnel_settings; }; + union pipe_update_flags { struct { uint32_t enable : 1; diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 2948a696ee12..7d16351bba99 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -207,6 +207,9 @@ struct link_service { bool (*dp_decide_link_settings)( struct dc_stream_state *stream, struct dc_link_settings *link_setting); + void (*dp_decide_tunnel_settings)( + struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting); enum dp_link_encoding (*mst_decide_link_encoding_format)( const struct dc_link *link); bool (*edp_decide_link_settings)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 7a87a7c07c1b..a890f581f4e8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -32,6 +32,7 @@ #define MEMORY_TYPE_MULTIPLIER_CZ 4 #define MEMORY_TYPE_HBM 2 +#define MAX_MCACHES 8 #define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0) @@ -65,6 +66,13 @@ struct resource_straps { uint32_t audio_stream_number; }; +struct dc_mcache_allocations { + int global_mcache_ids_plane0[MAX_MCACHES + 1]; + int global_mcache_ids_plane1[MAX_MCACHES + 1]; + int global_mcache_ids_mall_plane0[MAX_MCACHES + 1]; + int global_mcache_ids_mall_plane1[MAX_MCACHES + 1]; +}; + struct resource_create_funcs { void (*read_dce_straps)( struct dc_context *ctx, struct resource_straps *straps); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 953f4a4dacad..33ce470e4c88 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -37,36 +37,9 @@ #include "ivsrcid/ivsrcid_vislands30.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c index 2c72074310c7..d777b85e70da 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c @@ -46,36 +46,9 @@ #include "dc_types.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - DC_HPD1_INT_STATUS, - DC_HPD1_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - DC_HPD1_INT_CONTROL, - DC_HPD1_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd1_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create( dce60_irq_construct(irq_service, init_data); return irq_service; } - - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 49317934ef4f..3a9163acb49b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -37,36 +37,9 @@ #include "dc_types.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - DC_HPD1_INT_STATUS, - DC_HPD1_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - DC_HPD1_INT_CONTROL, - DC_HPD1_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd1_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create( dce80_irq_construct(irq_service, init_data); return irq_service; } - - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index 9ca28565a9d1..4ce9edd16344 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 916f0c974637..5847af0e66cb 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 1d61d475d36f..6417011d2246 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 42cdfe6c3538..71d2f065140b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic return DC_IRQ_SOURCE_INVALID; } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c index a443a8abb1ea..2a4080bdcf6b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c @@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create( dcn30_irq_construct(irq_service, init_data); return irq_service; } - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c index 8ffc7e2c681a..624f1ac309f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c @@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi } } -static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c index 262bb8b74b15..137caffae916 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c @@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi } } -static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c index 53e78ae7eecf..921cb167d920 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c @@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index e0563e880432..0118fd6e5db0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c index 2ef22299101a..adebfc888618 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c @@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c index f839afacd5a5..e9e315c75d76 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c @@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c index ea8c271171bc..79e5e8c137ca 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c @@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c index 7ec8e0de2f01..163b8ee9ebf7 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c @@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c index ea958628f8b8..f716ab0fd30e 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c @@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c index 8499e505cf3e..fd9bb1950c20 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c @@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index eca3d7ee7e4e..b595a11c5eaf 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -41,6 +41,16 @@ #include "reg_helper.h" #include "irq_service.h" +//HPD0_DC_HPD_INT_STATUS +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8 +//HPD1_DC_HPD_INT_STATUS +#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10 +#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4 +#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100 +#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8 #define CTX \ @@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source( src_id, ext_id); } + +bool hpd0_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + HPD0_DC_HPD_INT_CONTROL, + DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +bool hpd1_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + DC_HPD1_INT_STATUS, + DC_HPD1_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + DC_HPD1_INT_CONTROL, + DC_HPD1_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index b178f85944cd..bbcef3d2fe33 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -82,4 +82,12 @@ void dal_irq_service_set_generic( const struct irq_source_info *info, bool enable); +bool hpd0_ack( + struct irq_service *irq_service, + const struct irq_source_info *info); + +bool hpd1_ack( + struct irq_service *irq_service, + const struct irq_source_info *info); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index cc9191a5c9e6..9655e6fa53a4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -611,6 +611,7 @@ static bool detect_dp(struct dc_link *link, link->dpcd_caps.dongle_type = sink_caps->dongle_type; link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; link->dpcd_caps.dpcd_rev.raw = 0; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0; } return true; @@ -1007,21 +1008,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, link->reported_link_cap.link_rate > LINK_RATE_HIGH3) link->reported_link_cap.link_rate = LINK_RATE_HIGH3; - /* - * If this is DP over USB4 link then we need to: - * - Enable BW ALLOC support on DPtx if applicable - */ - if (dc->config.usb4_bw_alloc_support) { - if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) { - /* update with non reduced link cap if bw allocation mode is supported */ - if (link->dpia_bw_alloc_config.nrd_max_link_rate && - link->dpia_bw_alloc_config.nrd_max_lane_count) { - link->reported_link_cap.link_rate = - link->dpia_bw_alloc_config.nrd_max_link_rate; - link->reported_link_cap.lane_count = - link->dpia_bw_alloc_config.nrd_max_lane_count; - } - } + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { + if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false) + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false; } break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 268626e73c54..273a3be6d593 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_stream_state *streams[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; @@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + /* The subsequent call to dc_commit_updates_for_stream for a full update + * will release the current state and swap to a new state. Releasing the + * current state results in the stream pointers in the pipe_ctx structs + * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. + */ + for (i = 0; i < count; i++) + streams[i] = pipes[i]->stream; + for (i = 0; i < count; i++) { - stream_update.stream = pipes[i]->stream; + stream_update.stream = streams[i]; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipes[i]->stream, &stream_update, + streams[i], &stream_update, state); } @@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation) deallocate_usb4_bandwidth(pipe_ctx->stream); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) @@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) { /* reset internal save state to default since eDP is off */ enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link); - /* since current psp not loaded, we need to reset it to default*/ + /* since current psp not loaded, we need to reset it to default */ link->panel_mode = panel_mode; } } @@ -2611,7 +2620,7 @@ void link_set_dpms_on( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_hblank_reduction_on_rx(pipe_ctx); - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation) allocate_usb4_bandwidth(pipe_ctx->stream); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 0125f2cfc114..1a04f4b74585 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv) link_srv->dp_get_encoding_format = link_dp_get_encoding_format; link_srv->dp_should_enable_fec = dp_should_enable_fec; link_srv->dp_decide_link_settings = link_decide_link_settings; + link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings; link_srv->mst_decide_link_encoding_format = mst_decide_link_encoding_format; link_srv->edp_decide_link_settings = edp_decide_link_settings; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 21ee0d96c9d4..8f79881ad9f1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) return 0; // invalid value } +uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count) +{ + /* Calculate offset for LTTPR closest to DPTX which is highest in the chain + * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1 + */ + return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1); +} + uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) { switch (bw) { @@ -2013,11 +2021,9 @@ static bool retrieve_link_cap(struct dc_link *link) sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); /* Read DP tunneling information. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - status = dpcd_get_tunneling_device_data(link); - if (status != DC_OK) - dm_error("%s: Read DP tunneling device data failed.\n", __func__); - } + status = dpcd_get_tunneling_device_data(link); + if (status != DC_OK) + dm_error("%s: Read DP tunneling device data failed.\n", __func__); retrieve_cable_id(link); dpcd_write_cable_id_to_dprx(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h index 0ce0af3ddbeb..940b147cc5d4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); /* Convert PHY repeater count read from DPCD uint8_t. */ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); +/* Calculate embedded LTTPR address offset for vendor-specific behaviour */ +uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count); + bool dp_is_sink_present(struct dc_link *link); bool dp_is_lttpr_present(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index c149210096ac..22bfdced64ab 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -62,6 +62,36 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (status != DC_OK) goto err; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false) + goto err; + + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) { + status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY, + dpcd_dp_tun_data, 1); + + if (status != DC_OK) + goto err; + + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0]; + } + + DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) " + "DPIA_BW_Alloc_support=%d " + "CM_BW_Alloc_support=%d ", + __func__, link->link_index, + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id, + link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num, + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc, + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support); + status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, @@ -71,13 +101,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (status != DC_OK) goto err; - link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = - dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = - dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = - dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; - for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; @@ -120,3 +143,20 @@ bool dpia_query_hpd_status(struct dc_link *link) return link->hpd_status; } +void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting) +{ + struct dc_link *link = stream->link; + + memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting)); + + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { + dp_tunnel_setting->should_enable_dp_tunneling = + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) + dp_tunnel_setting->should_use_dp_bw_allocation = true; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h index 363f45a1a964..a61edfc9ca7a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); * Returns true if HPD high. */ bool dpia_query_hpd_status(struct dc_link *link); + +/* Decide the DP tunneling settings based on the DPCD capabilities + */ +void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting); + #endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index a254ead2f7e8..3af7564a84f1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -46,9 +46,10 @@ */ static bool link_dp_is_bw_alloc_available(struct dc_link *link) { - return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA - && link->hpd_status - && link->dpia_bw_alloc_config.bw_alloc_enabled); + return (link && link->hpd_status + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support); } static void reset_bw_alloc_struct(struct dc_link *link) @@ -141,7 +142,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link) * granuality, Driver_ID, CM_Group, & populate the BW allocation structs * for host router and dpia */ -static void init_usb4_bw_struct(struct dc_link *link) +static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link) { reset_bw_alloc_struct(link); @@ -282,49 +283,26 @@ static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw) // ------------------------------------------------------------------ // PUBLIC FUNCTIONS // ------------------------------------------------------------------ -bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) +bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) { bool ret = false; - uint8_t response = 0, - bw_support_dpia = 0, - bw_support_cm = 0; + uint8_t val; - if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) - goto out; + if (link->hpd_status) { + val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; - if (core_link_read_dpcd( - link, - DP_TUNNELING_CAPABILITIES, - &response, - sizeof(uint8_t)) == DC_OK) - bw_support_dpia = (response >> 7) & 1; - - if (core_link_read_dpcd( - link, - USB4_DRIVER_BW_CAPABILITY, - &response, - sizeof(uint8_t)) == DC_OK) - bw_support_cm = (response >> 7) & 1; - - /* Send request acknowledgment to Turn ON DPTX support */ - if (bw_support_cm && bw_support_dpia) { - - response = 0x80; - if (core_link_write_dpcd( - link, - DPTX_BW_ALLOCATION_MODE_CONTROL, - &response, - sizeof(uint8_t)) != DC_OK) { - DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n", - __func__, link->link_index); - } else { - // SUCCESS Enabled DPtx BW Allocation Mode Support - DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n", - __func__, link->link_index); + if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { + DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index); + + retrieve_usb4_dp_bw_allocation_info(link); + + if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) { + link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + } - ret = true; - init_usb4_bw_struct(link); link->dpia_bw_alloc_config.bw_alloc_enabled = true; + ret = true; /* * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other @@ -332,11 +310,12 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) * to make the CM to release preallocation and update estimated BW correctly for * all DPIAs per host router */ + // TODO: Zero allocation can be removed once the MSFT CM fix has been released link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); - } + } else + DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); } -out: return ret; } @@ -378,7 +357,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) */ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) { - if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) { + if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpia_bw_alloc_config.bw_alloc_enabled) { //1. Hot Plug if (link->hpd_status && peak_bw > 0) { // If DP over USB4 then we need to check BW allocation @@ -401,7 +381,7 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r if (link_dp_is_bw_alloc_available(link)) link_dpia_send_bw_alloc_request(link, req_bw); else - DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__); + DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__); } bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 6df9b946b00f..801965b5f9a4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -43,13 +43,13 @@ enum bw_type { }; /* - * Enable BW Allocation Mode Support from the DP-Tx side + * Enable USB4 DP BW allocation mode * * @link: pointer to the dc_link struct instance * * return: SUCCESS or FAILURE */ -bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link); +bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link); /* * Allocates only what the stream needs for bw, so if: diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 991b8ad4984b..693477413347 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -352,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data( irq_data->raw, DP_SINK_STATUS - DP_SINK_COUNT + 1); - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) { retval = core_link_read_dpcd( link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &irq_data->bytes.link_service_irq_esi0.raw, 1); @@ -521,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, dp_trace_link_loss_increment(link); } - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) { if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ) dp_handle_tunneling_irq(link); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index ef358afdfb65..2dc1a660e504 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -785,7 +785,6 @@ void override_training_settings( lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); - } enum dc_dp_training_pattern decide_cr_training_pattern( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 5a5d48fadbf2..66d0fb1b9b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -142,6 +142,14 @@ void decide_8b_10b_training_settings( lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles + * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences + */ + if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == + AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) { + lt_settings->lttpr_early_tps2 = true; + } } enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) @@ -173,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) return LTTPR_MODE_NON_LTTPR; } +static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t lttpr_count) +{ + /* Vendor-specific LTTPR early TPS2 sequence: + * 1. Output TPS2 + * 2. Wait 400us + * 3. Set link settings as usual + * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host + * 5. Wait 1ms + * 6. Begin link training as usual + * */ + + uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count); + + union dpcd_training_pattern dpcd_pattern = {0}; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1; + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1; + + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__); + + dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX); + + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + + udelay(400); + + dpcd_set_link_settings(link, lt_settings); + + core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1); + + udelay(1000); + } + enum link_training_result perform_8b_10b_clock_recovery_sequence( struct dc_link *link, const struct link_resource *link_res, @@ -383,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t repeater_cnt; + uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); uint8_t repeater_id; uint8_t lane = 0; @@ -391,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training( start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); + if (lt_settings->lttpr_early_tps2) + set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt); + else + dpcd_set_link_settings(link, lt_settings); if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { /* 2. perform link training (set link training done * to false is done as well) */ - repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index 002eb926cca4..e0008c5f08ad 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -1299,7 +1299,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s if (enable_easf_v) { dscl_prog_data->easf_v_en = true; dscl_prog_data->easf_v_ring = 0; - dscl_prog_data->easf_v_sharp_factor = 0; + dscl_prog_data->easf_v_sharp_factor = 1; dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode /* 2-bit, BF3 chroma mode correction calculation mode */ @@ -1463,7 +1463,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s if (enable_easf_h) { dscl_prog_data->easf_h_en = true; dscl_prog_data->easf_h_ring = 0; - dscl_prog_data->easf_h_sharp_factor = 0; + dscl_prog_data->easf_h_sharp_factor = 1; dscl_prog_data->easf_h_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_h_bf2_mode = diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 1c3949b24611..36a284305a70 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -480,6 +480,10 @@ enum sharpness_setting { SHARPNESS_ZERO, SHARPNESS_CUSTOM }; +enum sharpness_range_source { + SHARPNESS_RANGE_DCN = 0, + SHARPNESS_RANGE_DCN_OVERRIDE +}; struct spl_sharpness_range { int sdr_rgb_min; int sdr_rgb_max; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index e759ce6ca700..3f3fa1b6a69e 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -445,6 +445,8 @@ struct dmub_srv_hw_funcs { uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub); + uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub); + void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); bool (*is_supported)(struct dmub_srv *dmub); @@ -1053,4 +1055,16 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, uint32_t timeout_us, uint32_t num_free_required); +/** + * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0 + * @dmub: the dmub service + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_HW_FAILURE - issue with HW programming + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 76e9dcc15466..57fa05bddb45 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -550,6 +550,11 @@ union replay_hw_flags { * @is_alpm_initialized: Indicates whether ALPM is initialized */ uint32_t is_alpm_initialized : 1; + + /** + * @alpm_mode: Indicates ALPM mode selected + */ + uint32_t alpm_mode : 2; } bitfields; uint32_t u32All; @@ -742,6 +747,14 @@ enum dmub_ips_disable_type { DMUB_IPS_DISABLE_IPS2_Z10 = 4, DMUB_IPS_DISABLE_DYNAMIC = 5, DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6, + DMUB_IPS_DISABLE_Z8_RETENTION = 7, +}; + +enum dmub_ips_rcg_disable_type { + DMUB_IPS_RCG_ENABLE = 0, + DMUB_IPS0_RCG_DISABLE = 1, + DMUB_IPS1_RCG_DISABLE = 2, + DMUB_IPS_RCG_DISABLE = 3 }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 @@ -820,11 +833,12 @@ enum dmub_shared_state_feature_id { */ union dmub_shared_state_ips_fw_signals { struct { - uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ + uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */ uint32_t detection_required : 1; /**< 1 if detection is required */ - uint32_t reserved_bits : 28; /**< Reversed */ + uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */ + uint32_t reserved_bits : 27; /**< Reversed */ } bits; uint32_t all; }; @@ -839,7 +853,10 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */ - uint32_t reserved_bits : 27; /**< Reversed bits */ + uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */ + uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */ + uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */ + uint32_t reserved_bits : 24; /**< Reversed bits */ } bits; uint32_t all; }; @@ -868,7 +885,9 @@ struct dmub_shared_state_ips_fw { uint32_t ips1_exit_count; /**< Exit counter for IPS1 */ uint32_t ips2_entry_count; /**< Entry counter for IPS2 */ uint32_t ips2_exit_count; /**< Exit counter for IPS2 */ - uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */ + uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */ + uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */ + uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ /** @@ -1256,6 +1275,10 @@ enum dmub_gpint_command { * DESC: Setup debug configs. */ DMUB_GPINT__SETUP_DEBUG_MODE = 136, + /** + * DESC: Initiates IPS wake sequence. + */ + DMUB_GPINT__IPS_DEBUG_WAKE = 137, }; /** @@ -2116,6 +2139,11 @@ union dmub_cmd_fams2_config { } stream_v1; //v1 }; +struct dmub_fams2_config_v2 { + struct dmub_cmd_fams2_global_config global; + struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1 +}; + /** * DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy) */ @@ -2125,6 +2153,22 @@ struct dmub_rb_cmd_fams2 { }; /** + * Indirect buffer descriptor + */ +struct dmub_ib_data { + union dmub_addr src; // location of indirect buffer in memory + uint16_t size; // indirect buffer size in bytes +}; + +/** + * DMUB rb command definition for commands passed over indirect buffer + */ +struct dmub_rb_cmd_ib { + struct dmub_cmd_header header; + struct dmub_ib_data ib_data; +}; + +/** * enum dmub_cmd_idle_opt_type - Idle optimization command type. */ enum dmub_cmd_idle_opt_type { @@ -2147,6 +2191,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware notify power state. */ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, + + /** + * DCN notify to release HW. + */ + DMUB_CMD__IDLE_OPT_RELEASE_HW = 4, }; /** @@ -2908,8 +2957,9 @@ enum dmub_cmd_fams_type { */ DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3, DMUB_CMD__FAMS2_CONFIG = 4, - DMUB_CMD__FAMS2_DRR_UPDATE = 5, - DMUB_CMD__FAMS2_FLIP = 6, + DMUB_CMD__FAMS2_IB_CONFIG = 5, + DMUB_CMD__FAMS2_DRR_UPDATE = 6, + DMUB_CMD__FAMS2_FLIP = 7, }; /** @@ -3616,6 +3666,12 @@ struct dmub_rb_cmd_psr_set_power_opt { struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; }; +enum dmub_alpm_mode { + ALPM_AUXWAKE = 0, + ALPM_AUXLESS = 1, + ALPM_UNSUPPORTED = 2, +}; + /** * Definition of Replay Residency GPINT command. * Bit[0] - Residency mode for Revision 0 @@ -3749,6 +3805,15 @@ enum dmub_cmd_replay_general_subtype { REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE, }; +struct dmub_alpm_auxless_data { + uint16_t lfps_setup_ns; + uint16_t lfps_period_ns; + uint16_t lfps_silence_ns; + uint16_t lfps_t1_t2_override_us; + short lfps_t1_t2_offset_us; + uint8_t lttpr_count; +}; + /** * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. */ @@ -3819,6 +3884,10 @@ struct dmub_cmd_replay_copy_settings_data { * Use FSM state for Replay power up/down */ uint8_t use_phy_fsm; + /** + * Use for AUX-less ALPM LFPS wake operation + */ + struct dmub_alpm_auxless_data auxless_alpm_data; }; /** @@ -5884,8 +5953,11 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; + struct dmub_rb_cmd_fams2 fams2_config; + struct dmub_rb_cmd_ib ib_fams2_config; + struct dmub_rb_cmd_fams2_drr_update fams2_drr_update; struct dmub_rb_cmd_fams2_flip fams2_flip; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index 731ca9b6a6cf..2575dbc448f7 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) const uint32_t timeout_us = 1 * 1000 * 1000; //1s const uint32_t poll_delay_us = 1; //1us uint32_t i = 0; - uint32_t in_reset, scratch, pwait_mode; + uint32_t enabled, in_reset, scratch, pwait_mode; - REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, + DMCUB_ENABLE, &enabled); + REG_GET(DMCUB_CNTL2, + DMCUB_SOFT_RESET, &in_reset); - if (in_reset == 0) { + if (enabled && in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - for (i = 0; i < timeout_us; i++) { - if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) - break; - - udelay(poll_delay_us); - } - for (; i < timeout_us; i++) { scratch = dmub->hw_funcs.get_gpint_response(dmub); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index c917a70b3c19..acca7943a8c8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -952,10 +952,8 @@ enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, !dmub->hw_funcs.get_inbox1_wptr) return DMUB_STATUS_INVALID; - /* take a snapshot of the required mailbox state */ - scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); - for (i = 0; i <= timeout_us; i += polling_interval_us) { + scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending && @@ -978,30 +976,6 @@ enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } -static enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) -{ - uint32_t rptr; - - /* update inbox1 state */ - rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - - if (rptr > dmub->inbox1.rb.capacity) - return DMUB_STATUS_HW_FAILURE; - - if (dmub->inbox1.rb.rptr > rptr) { - /* rb wrapped */ - dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; - } else { - dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; - } - dmub->inbox1.rb.rptr = rptr; - - /* update reg_inbox0 */ - dmub_srv_update_reg_inbox0_status(dmub); - - return DMUB_STATUS_OK; -} - enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { @@ -1353,3 +1327,33 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } + +enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) +{ + uint32_t rptr; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + + /* update inbox1 state */ + rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + + if (rptr > dmub->inbox1.rb.capacity) + return DMUB_STATUS_HW_FAILURE; + + if (dmub->inbox1.rb.rptr > rptr) { + /* rb wrapped */ + dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } else { + dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } + dmub->inbox1.rb.rptr = rptr; + + /* update reg_inbox0 */ + dmub_srv_update_reg_inbox0_status(dmub); + + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 1867aac57cf2..da74ed66c8f9 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -89,6 +89,8 @@ struct link_training_settings { bool enhanced_framing; enum lttpr_mode lttpr_mode; + bool lttpr_early_tps2; + /* disallow different lanes to have different lane settings */ bool disallow_per_lane_settings; /* dpcd lane settings will always use the same hw lane settings diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h index 14574112c469..c4aaa86a95e2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h @@ -1147,6 +1147,22 @@ #define regUVD_DPG_LMA_CTL2_BASE_IDX 1 +// addressBlock: uvd_mmsch_dec +// base address: 0x20d2c +#define regMMSCH_VF_VMID 0x054b +#define regMMSCH_VF_VMID_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_LO 0x054c +#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_HI 0x054d +#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1 +#define regMMSCH_VF_CTX_SIZE 0x054e +#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_HOST 0x0552 +#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_RESP 0x0553 +#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1 + + // addressBlock: uvd_vcn_umsch_dec // base address: 0x21500 #define regVCN_UMSCH_MES_CNTL 0x0740 diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h index 5c119a6b87fb..bd7242e4e9c6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h @@ -5929,6 +5929,29 @@ #define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L +// addressBlock: uvd_mmsch_dec +//MMSCH_VF_VMID +#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0 +#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5 +#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL +#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L +//MMSCH_VF_CTX_ADDR_LO +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_CTX_ADDR_HI +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_CTX_SIZE +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0 +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_HOST +#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_RESP +#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL + + // addressBlock: uvd_vcn_umsch_dec //VCN_UMSCH_MES_CNTL #define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0160d65f3f5e..2d1135bdc4b9 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -183,6 +183,7 @@ enum atom_dgpu_vram_type { ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61, ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70, ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80, + ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81, }; enum atom_dp_vs_preemph_def{ diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index f345c233bc47..f24a1d8c77db 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -3432,19 +3432,6 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) return ret; } -bool smu_mode2_reset_is_support(struct smu_context *smu) -{ - bool ret = false; - - if (!smu->pm_enabled) - return false; - - if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) - ret = smu->ppt_funcs->mode2_reset_is_support(smu); - - return ret; -} - bool smu_link_reset_is_support(struct smu_context *smu) { bool ret = false; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index a95a3dd5a895..d47e32ae4671 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1233,11 +1233,6 @@ struct pptable_funcs { bool (*mode1_reset_is_support)(struct smu_context *smu); /** - * @mode2_reset_is_support: Check if GPU supports mode2 reset. - */ - bool (*mode2_reset_is_support)(struct smu_context *smu); - - /** * @link_reset_is_support: Check if GPU supports link reset. */ bool (*link_reset_is_support)(struct smu_context *smu); @@ -1621,7 +1616,6 @@ int smu_get_power_limit(void *handle, enum pp_power_type pp_power_type); bool smu_mode1_reset_is_support(struct smu_context *smu); -bool smu_mode2_reset_is_support(struct smu_context *smu); bool smu_link_reset_is_support(struct smu_context *smu); int smu_mode1_reset(struct smu_context *smu); int smu_link_reset(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index c22ed28c0cdb..4263798d716b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -163,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu); int smu_v13_0_system_features_control(struct smu_context *smu, bool en); -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count); - int smu_v13_0_set_allowed_mask(struct smu_context *smu); int smu_v13_0_notify_display_change(struct smu_context *smu); @@ -300,14 +298,5 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, void smu_v13_0_interrupt_work(struct smu_context *smu); void smu_v13_0_reset_custom_level(struct smu_context *smu); -bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); -int smu_v13_0_12_get_max_metrics_size(void); -int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); -int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); -extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; -extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 19a25fdc2f5b..115e3fa456bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3089,11 +3089,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, return 0; } -static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int sienna_cichlid_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -3229,7 +3224,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings, .set_config_table = sienna_cichlid_set_config_table, .get_unique_id = sienna_cichlid_get_unique_id, - .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported, .mode2_reset = sienna_cichlid_mode2_reset, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 5cb3b9bb6089..6de653d2ed62 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1985,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int aldebaran_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -2095,7 +2090,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, - .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 45b5ab423844..a7167668d189 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -749,18 +749,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu) return ret; } -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) -{ - int ret = 0; - - if (!smu->pm_enabled) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); - - return ret; -} - int smu_v13_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index b6e5da7b06ef..7d4ff09be7e8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2919,11 +2919,6 @@ static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -3680,7 +3675,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_pm_metrics = smu_v13_0_6_get_pm_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 5313206ae4bb..d151bcd0cca7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -74,4 +74,12 @@ extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); +int smu_v13_0_12_get_max_metrics_size(void); +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, uint32_t *value); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); +extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; +extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 11a492f21157..51a3e0fc2f56 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev) */ int cik_init(struct radeon_device *rdev) { - struct radeon_ring *ring; + struct radeon_ring *ring, *ring_cp1, *ring_cp2; int r; /* Read BIOS */ @@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_index); + ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; + ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; + ring_cp1->ring_obj = NULL; + ring_cp2->ring_obj = NULL; + ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS; + ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS; + + r600_ring_init(rdev, ring_cp1, 1024 * 1024); + r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index); if (r) return r; - ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_index); + r600_ring_init(rdev, ring_cp2, 1024 * 1024); + r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index); if (r) - return r; + goto out; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; ring->ring_obj = NULL; @@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) - return r; + goto out; rdev->accel_working = true; r = cik_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); + radeon_doorbell_free(rdev, ring_cp1->doorbell_index); + radeon_doorbell_free(rdev, ring_cp2->doorbell_index); + ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS; + ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS; cik_cp_fini(rdev); cik_sdma_fini(rdev); cik_irq_fini(rdev); @@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev) */ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; + r = -EINVAL; + goto out; } return 0; + +out: + radeon_doorbell_free(rdev, ring_cp1->doorbell_index); + radeon_doorbell_free(rdev, ring_cp2->doorbell_index); + return r; } /** @@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev) */ void cik_fini(struct radeon_device *rdev) { + struct radeon_ring *ring; radeon_pm_fini(rdev); cik_cp_fini(rdev); cik_sdma_fini(rdev); @@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); uvd_v1_0_fini(rdev); + ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; + radeon_doorbell_free(rdev, ring->doorbell_index); + ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; + radeon_doorbell_free(rdev, ring->doorbell_index); radeon_uvd_fini(rdev); radeon_vce_fini(rdev); cik_pcie_gart_fini(rdev); |