diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
183 files changed, 2316 insertions, 1283 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d58e74ae8ade..2afecc55090f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -104,7 +104,8 @@ amdgpu-y += \ amdgpu-y += \ df_v1_7.o \ df_v3_6.o \ - df_v4_3.o + df_v4_3.o \ + df_v4_6_2.o # add GMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8df702eaa2ad..8b4ca2576a3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -363,9 +363,6 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; -#define HW_REV(_Major, _Minor, _Rev) \ - ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) - struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; @@ -1119,6 +1116,13 @@ static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, return adev->ip_versions[ip][inst] & ~0xFFU; } +static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This returns full version - major/minor/rev/variant/subrevision */ + return adev->ip_versions[ip][inst]; +} + static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) { return container_of(ddev, struct amdgpu_device, ddev); @@ -1333,9 +1337,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); -bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); -bool amdgpu_device_aspm_support_quirk(void); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2bca37044ad0..d62e49758635 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -68,7 +68,7 @@ struct amdgpu_acpi_xcc_info { struct amdgpu_acpi_dev_info { struct list_head list; struct list_head xcc_list; - uint16_t bdf; + uint32_t sbdf; uint16_t supp_xcp_mode; uint16_t xcp_mode; uint16_t mem_mode; @@ -927,7 +927,7 @@ static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, #endif } -static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) +static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u32 sbdf) { struct amdgpu_acpi_dev_info *acpi_dev; @@ -935,14 +935,14 @@ static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) return NULL; list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list) - if (acpi_dev->bdf == bdf) + if (acpi_dev->sbdf == sbdf) return acpi_dev; return NULL; } static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, - struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf) + struct amdgpu_acpi_xcc_info *xcc_info, u32 sbdf) { struct amdgpu_acpi_dev_info *tmp; union acpi_object *obj; @@ -955,7 +955,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, INIT_LIST_HEAD(&tmp->xcc_list); INIT_LIST_HEAD(&tmp->list); - tmp->bdf = bdf; + tmp->sbdf = sbdf; obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, AMD_XCC_DSM_GET_SUPP_MODE, NULL, @@ -1007,7 +1007,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, DRM_DEBUG_DRIVER( "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ", - tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, + tmp->sbdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, tmp->tmr_base, tmp->tmr_size); list_add_tail(&tmp->list, &amdgpu_acpi_dev_list); *dev_info = tmp; @@ -1023,7 +1023,7 @@ out: } static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, - u16 *bdf) + u32 *sbdf) { union acpi_object *obj; acpi_status status; @@ -1054,8 +1054,10 @@ static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF; /* xcp node of this xcc [47:40] */ xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF; + /* PF domain of this xcc [31:16] */ + *sbdf = (obj->integer.value) & 0xFFFF0000; /* PF bus/dev/fn of this xcc [63:48] */ - *bdf = (obj->integer.value >> 48) & 0xFFFF; + *sbdf |= (obj->integer.value >> 48) & 0xFFFF; ACPI_FREE(obj); obj = NULL; @@ -1079,7 +1081,7 @@ static int amdgpu_acpi_enumerate_xcc(void) struct acpi_device *acpi_dev; char hid[ACPI_ID_LEN]; int ret, id; - u16 bdf; + u32 sbdf; INIT_LIST_HEAD(&amdgpu_acpi_dev_list); xa_init(&numa_info_xa); @@ -1107,16 +1109,16 @@ static int amdgpu_acpi_enumerate_xcc(void) xcc_info->handle = acpi_device_handle(acpi_dev); acpi_dev_put(acpi_dev); - ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf); + ret = amdgpu_acpi_get_xcc_info(xcc_info, &sbdf); if (ret) { kfree(xcc_info); continue; } - dev_info = amdgpu_acpi_get_dev(bdf); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) - ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf); + ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf); if (ret == -ENOMEM) return ret; @@ -1136,13 +1138,14 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size) { struct amdgpu_acpi_dev_info *dev_info; - u16 bdf; + u32 sbdf; if (!tmr_offset || !tmr_size) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; @@ -1157,13 +1160,14 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, { struct amdgpu_acpi_dev_info *dev_info; struct amdgpu_acpi_xcc_info *xcc_info; - u16 bdf; + u32 sbdf; if (!numa_info) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index bd1d4b2b6b16..23448359838b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -425,6 +425,32 @@ validate_fail: return ret; } +static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, + uint32_t domain, + struct dma_fence *fence) +{ + int ret = amdgpu_bo_reserve(bo, false); + + if (ret) + return ret; + + ret = amdgpu_amdkfd_bo_validate(bo, domain, true); + if (ret) + goto unreserve_out; + + ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); + if (ret) + goto unreserve_out; + + dma_resv_add_fence(bo->tbo.base.resv, fence, + DMA_RESV_USAGE_BOOKKEEP); + +unreserve_out: + amdgpu_bo_unreserve(bo); + + return ret; +} + static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) { return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); @@ -1784,6 +1810,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + } else { + mutex_lock(&avm->process_info->lock); + if (avm->process_info->eviction_fence && + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain, + &avm->process_info->eviction_fence->base); + mutex_unlock(&avm->process_info->lock); + if (ret) + goto err_validate_bo; } if (offset) @@ -1793,6 +1828,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: err_pin_bo: +err_validate_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: @@ -1866,10 +1902,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (unlikely(ret)) return ret; - /* The eviction fence should be removed by the last unmap. - * TODO: Log an error condition if the bo still has the eviction fence - * attached - */ amdgpu_amdkfd_remove_eviction_fence(mem->bo, process_info->eviction_fence); pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, @@ -1998,19 +2030,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( if (unlikely(ret)) goto out_unreserve; - if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { - /* Validate BO only once. The eviction fence gets added to BO - * the first time it is mapped. Validate will wait for all - * background evictions to complete. - */ - ret = amdgpu_amdkfd_bo_validate(bo, domain, true); - if (ret) { - pr_debug("Validate failed\n"); - goto out_unreserve; - } - } - list_for_each_entry(entry, &mem->attachments, list) { if (entry->bo_va->base.vm != avm || entry->is_mapped) continue; @@ -2037,10 +2056,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( mem->mapped_to_gpu_memory); } - if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) - dma_resv_add_fence(bo->tbo.base.resv, - &avm->process_info->eviction_fence->base, - DMA_RESV_USAGE_BOOKKEEP); ret = unreserve_bo_and_vms(&ctx, false, false); goto out; @@ -2074,7 +2089,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); - struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; @@ -2115,15 +2129,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( mem->mapped_to_gpu_memory); } - /* If BO is unmapped from all VMs, unfence it. It can be evicted if - * required. - */ - if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && - !mem->bo->tbo.pin_count) - amdgpu_amdkfd_remove_eviction_fence(mem->bo, - process_info->eviction_fence); - unreserve_out: unreserve_bo_and_vms(&ctx, false, false); out: @@ -2351,8 +2356,20 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, amdgpu_sync_create(&(*mem)->sync); (*mem)->is_imported = true; + mutex_lock(&avm->process_info->lock); + if (avm->process_info->eviction_fence && + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, + &avm->process_info->eviction_fence->base); + mutex_unlock(&avm->process_info->lock); + if (ret) + goto err_remove_mem; + return 0; +err_remove_mem: + remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_vma_node_revoke(&obj->vma_node, drm_priv); err_free_mem: kfree(*mem); err_put_obj: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 38ccec913f00..f3a09ecb7699 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "atom.h" +#include <linux/device.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/acpi.h> @@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return false; + /* ATRM is for on-platform devices only */ + if (dev_is_removable(&adev->pdev->dev)) + return false; + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 122472d88756..0245de81cabd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1116,6 +1116,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) return r; } + /* FIXME: In theory this loop shouldn't be needed any more when + * amdgpu_vm_handle_moved handles all moved BOs that are reserved + * with p->ticket. But removing it caused test regressions, so I'm + * leaving it here for now. + */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { bo_va = e->bo_va; if (bo_va == NULL) @@ -1130,7 +1135,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) return r; } - r = amdgpu_vm_handle_moved(adev, vm); + r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 3136a0774dd9..a53f436fa9f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + if (!adev->smc_rreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * ssize_t result = 0; int r; + if (!adev->smc_wreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9340e8dc0413..fd8cd8e2d3f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -41,6 +41,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> +#include <linux/device.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <linux/efi.h> @@ -1073,6 +1074,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_psp_wait_for_bootloader(adev); ret = amdgpu_atomfirmware_asic_init(adev, true); + /* TODO: check the return val and stop device initialization if boot fails */ + amdgpu_psp_query_boot_status(adev); return ret; } else { return amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1456,14 +1459,14 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) } /* - * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic - * speed switching. Until we have confirmation from Intel that a specific host - * supports it, it's safer that we keep it disabled for all. + * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids + * don't support dynamic speed switching. Until we have confirmation from Intel + * that a specific host supports it, it's safer that we keep it disabled for all. * * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 */ -bool amdgpu_device_pcie_dynamic_switching_supported(void) +static bool amdgpu_device_pcie_dynamic_switching_supported(void) { #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); @@ -1496,20 +1499,13 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) default: return false; } + if (adev->flags & AMD_IS_APU) + return false; + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) + return false; return pcie_aspm_enabled(adev->pdev); } -bool amdgpu_device_aspm_support_quirk(void) -{ -#if IS_ENABLED(CONFIG_X86) - struct cpuinfo_x86 *c = &cpu_data(0); - - return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); -#else - return true; -#endif -} - /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_device_vga_set_decode - enable/disable vga decode @@ -2230,7 +2226,6 @@ out: */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev_to_drm(adev); struct pci_dev *parent; int i, r; bool total; @@ -2301,7 +2296,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((adev->flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) + !dev_is_removable(&adev->pdev->dev)) adev->flags |= AMD_IS_PX; if (!(adev->flags & AMD_IS_APU)) { @@ -2315,6 +2310,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; + if (!amdgpu_device_pcie_dynamic_switching_supported()) + adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; total = true; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -2492,6 +2489,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) } r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + DRM_SCHED_PRIORITY_COUNT, ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, ring->sched_score, ring->name, @@ -2661,6 +2659,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + /* Don't init kfd if whole hive need to be reset during init */ if (!adev->gmc.xgmi.pending_reset) { kgd2kfd_init_zone_device(adev); @@ -3258,6 +3259,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) amdgpu_virt_request_full_gpu(adev, false); } + amdgpu_ttm_set_buffer_funcs_status(adev, false); + r = amdgpu_device_ip_suspend_phase1(adev); if (r) return r; @@ -3447,6 +3450,9 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) r = amdgpu_device_ip_resume_phase2(adev); + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + return r; } @@ -3958,13 +3964,23 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } } else { - tmp = amdgpu_reset_method; - /* It should do a default reset when loading or reloading the driver, - * regardless of the module parameter reset_method. - */ - amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); - amdgpu_reset_method = tmp; + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 7): + case IP_VERSION(13, 0, 10): + r = psp_gpu_reset(adev); + break; + default: + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; + r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; + break; + } + if (r) { dev_err(adev->dev, "asic reset on init failed\n"); goto failed; @@ -4128,7 +4144,7 @@ fence_driver_init: px = amdgpu_device_supports_px(ddev); - if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, px); @@ -4234,6 +4250,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); + amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_device_ip_fini_early(adev); amdgpu_irq_fini_hw(adev); @@ -4276,7 +4294,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) px = amdgpu_device_supports_px(adev_to_drm(adev)); - if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) vga_switcheroo_unregister_client(adev->pdev); @@ -4405,6 +4423,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_ras_suspend(adev); + amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_device_ip_suspend_phase1(adev); if (!adev->in_s0ix) @@ -5176,6 +5196,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; + if (tmp_adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); + if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); @@ -5555,10 +5578,6 @@ skip_hw_reset: drm_sched_start(&ring->sched, true); } - if (adev->enable_mes && - amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)) - amdgpu_mes_self_test(tmp_adev); - if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b6cddcad122f..0431eafa86b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -35,6 +35,7 @@ #include "df_v1_7.h" #include "df_v3_6.h" #include "df_v4_3.h" +#include "df_v4_6_2.h" #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" @@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); #define mmRCC_CONFIG_MEMSIZE 0xde3 +#define mmMP0_SMN_C2PMSG_33 0x16061 #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 @@ -238,8 +240,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint8_t *binary) { - uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - int ret = 0; + uint64_t vram_size; + u32 msg; + int i, ret = 0; + + /* It can take up to a second for IFWI init to complete on some dGPUs, + * but generally it should be in the 60-100ms range. Normally this starts + * as soon as the device gets power so by the time the OS loads this has long + * completed. However, when a card is hotplugged via e.g., USB4, we need to + * wait for this to complete. Once the C2PMSG is updated, we can + * continue. + */ + if (dev_is_removable(&adev->pdev->dev)) { + for (i = 0; i < 1000; i++) { + msg = RREG32(mmMP0_SMN_C2PMSG_33); + if (msg & 0x80000000) + break; + msleep(1); + } + } + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; if (vram_size) { uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; @@ -1487,7 +1507,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / le32_to_cpu(gc_info->v2.gc_num_sh_per_se); adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); - if (le16_to_cpu(gc_info->v2.header.version_minor == 1)) { + if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ @@ -2448,6 +2468,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) adev->gmc.xgmi.supported = true; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) + adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0); + /* set NBIO version */ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(6, 1, 0): @@ -2559,6 +2582,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 3, 0): adev->df.funcs = &df_v4_3_funcs; break; + case IP_VERSION(4, 6, 2): + adev->df.funcs = &df_v4_6_2_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index b5e28fa3f414..e7e87a3b2601 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -409,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) if (!r) r = amdgpu_vm_clear_freed(adev, vm, NULL); if (!r) - r = amdgpu_vm_handle_moved(adev, vm); + r = amdgpu_vm_handle_moved(adev, vm, ticket); if (r && r != -EBUSY) DRM_ERROR("Failed to invalidate VM page tables (%d))\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6cc6e3991410..3095a3a864af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2041,6 +2041,14 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); +static const struct amdgpu_asic_type_quirk asic_type_quirks[] = { + /* differentiate between P10 and P11 asics with the same DID */ + {0x67FF, 0xE3, CHIP_POLARIS10}, + {0x67FF, 0xE7, CHIP_POLARIS10}, + {0x67FF, 0xF3, CHIP_POLARIS10}, + {0x67FF, 0xF7, CHIP_POLARIS10}, +}; + static const struct drm_driver amdgpu_kms_driver; static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) @@ -2083,6 +2091,22 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) } } +static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) { + if (pdev->device == asic_type_quirks[i].device && + pdev->revision == asic_type_quirks[i].revision) { + flags &= ~AMD_ASIC_MASK; + flags |= asic_type_quirks[i].type; + break; + } + } + + return flags; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2110,15 +2134,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } - /* differentiate between P10 and P11 asics with the same DID */ - if (pdev->device == 0x67FF && - (pdev->revision == 0xE3 || - pdev->revision == 0xE7 || - pdev->revision == 0xF3 || - pdev->revision == 0xF7)) { - flags &= ~AMD_ASIC_MASK; - flags |= CHIP_POLARIS10; - } + + flags = amdgpu_fix_asic_type(pdev, flags); /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index cba7e6cdc7cc..5706b282a0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -56,21 +56,15 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = { void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) { - struct amdgpu_device *adev = drm_to_adev(file->minor->dev); struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_mem_stats stats; ktime_t usage[AMDGPU_HW_IP_NUM]; - uint32_t bus, dev, fn, domain; unsigned int hw_ip; int ret; memset(&stats, 0, sizeof(stats)); - bus = adev->pdev->bus->number; - domain = pci_domain_nr(adev->pdev->bus); - dev = PCI_SLOT(adev->pdev->devfn); - fn = PCI_FUNC(adev->pdev->devfn); ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret) @@ -88,9 +82,6 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); - drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); - drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context); drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c92e0aba69e1..a2a29dcb2422 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -385,9 +385,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring = &kiq->ring; u32 domain = AMDGPU_GEM_DOMAIN_GTT; +#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) domain |= AMDGPU_GEM_DOMAIN_VRAM; +#endif /* create MQD for KIQ */ if (!adev->enable_mes_kiq && !ring->mqd_obj) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a02992bff6af..2dce338b0f1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -786,6 +786,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 78476bc75b4e..1f357198533f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -325,8 +325,8 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { - struct drm_sched_rq *rq = &sched->sched_rq[i]; + for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 59f10b353b3a..9ddbf1494326 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -557,8 +557,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, mqd_prop.hqd_queue_priority = p->hqd_queue_priority; mqd_prop.hqd_active = false; + if (p->queue_type == AMDGPU_RING_TYPE_GFX || + p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { + mutex_lock(&adev->srbm_mutex); + amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); + } + mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); + if (p->queue_type == AMDGPU_RING_TYPE_GFX || + p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + amdgpu_bo_unreserve(q->mqd_obj); } @@ -994,9 +1006,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, switch (queue_type) { case AMDGPU_RING_TYPE_GFX: ring->funcs = adev->gfx.gfx_ring[0].funcs; + ring->me = adev->gfx.gfx_ring[0].me; + ring->pipe = adev->gfx.gfx_ring[0].pipe; break; case AMDGPU_RING_TYPE_COMPUTE: ring->funcs = adev->gfx.compute_ring[0].funcs; + ring->me = adev->gfx.compute_ring[0].me; + ring->pipe = adev->gfx.compute_ring[0].pipe; break; case AMDGPU_RING_TYPE_SDMA: ring->funcs = adev->sdma.instance[0].ring.funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 648bd5e12830..32b701cc0376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2120,6 +2120,21 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) return ret; } +int amdgpu_psp_query_boot_status(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + int ret = 0; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return 0; + + if (psp->funcs && + psp->funcs->query_boot_status) + ret = psp->funcs->query_boot_status(psp); + + return ret; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 7111dd32e66f..5d36ad3f48c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -134,6 +134,7 @@ struct psp_funcs { int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); + int (*query_boot_status)(struct psp_context *psp); }; struct ta_funcs { @@ -537,4 +538,6 @@ int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); +int amdgpu_psp_query_boot_status(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3c83a2b8fb2c..b7fe5951b166 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -635,8 +635,11 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, static inline void put_obj(struct ras_manager *obj) { - if (obj && (--obj->use == 0)) + if (obj && (--obj->use == 0)) { list_del(&obj->node); + amdgpu_ras_error_data_fini(&obj->err_data); + } + if (obj && (obj->use < 0)) DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } @@ -666,6 +669,9 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, if (alive_obj(obj)) return NULL; + if (amdgpu_ras_error_data_init(&obj->err_data)) + return NULL; + obj->head = *head; obj->adev = adev; list_add(&obj->node, &con->head); @@ -1023,44 +1029,68 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d } static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, - struct ras_query_if *query_if, + struct ras_manager *ras_mgr, struct ras_err_data *err_data, + const char *blk_name, bool is_ue) { - struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); - const char *blk_name = get_ras_block_str(&query_if->head); struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; struct ras_err_info *err_info; - if (is_ue) - dev_info(adev->dev, "%ld uncorrectable hardware errors detected in %s block\n", - ras_mgr->err_data.ue_count, blk_name); - else - dev_info(adev->dev, "%ld correctable hardware errors detected in %s block\n", - ras_mgr->err_data.ce_count, blk_name); + if (is_ue) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ue_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new uncorrectable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ue_count, + blk_name); + } + } - for_each_ras_error(err_node, err_data) { - err_info = &err_node->err_info; - mcm_info = &err_info->mcm_info; - if (is_ue && err_info->ue_count) { - dev_info(adev->dev, "socket: %d, die: %d " - "%lld uncorrectable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ue_count, - blk_name); - } else if (!is_ue && err_info->ce_count) { - dev_info(adev->dev, "socket: %d, die: %d " - "%lld correctable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ce_count, - blk_name); + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld uncorrectable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); + } + + } else { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ce_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block, " + "no user action is needed\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); + } + } + + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block, " + "no user action is needed\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); } } } +static inline bool err_data_has_source_info(struct ras_err_data *data) +{ + return !list_empty(&data->err_node_list); +} + static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, struct ras_query_if *query_if, struct ras_err_data *err_data) @@ -1069,9 +1099,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, const char *blk_name = get_ras_block_str(&query_if->head); if (err_data->ce_count) { - if (!list_empty(&err_data->err_node_list)) { - amdgpu_ras_error_print_error_data(adev, query_if, - err_data, false); + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1094,9 +1123,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } if (err_data->ue_count) { - if (!list_empty(&err_data->err_node_list)) { - amdgpu_ras_error_print_error_data(adev, query_if, - err_data, true); + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1118,6 +1146,25 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } +static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) +{ + struct ras_err_node *err_node; + struct ras_err_info *err_info; + + if (err_data_has_source_info(err_data)) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + + amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count); + amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count); + } + } else { + /* for legacy asic path which doesn't has error source info */ + obj->err_data.ue_count += err_data->ue_count; + obj->err_data.ce_count += err_data->ce_count; + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -1156,8 +1203,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; @@ -1174,6 +1220,10 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block block) { struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + struct amdgpu_hive_info *hive; + int hive_ras_recovery = 0; if (!block_obj || !block_obj->hw_ops) { dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", @@ -1181,7 +1231,20 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, return -EOPNOTSUPP; } - if (!amdgpu_ras_is_supported(adev, block)) + if (!amdgpu_ras_is_supported(adev, block) || + !amdgpu_ras_get_mca_debug_mode(adev)) + return -EOPNOTSUPP; + + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + + /* skip ras error reset in gpu reset */ + if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) || + hive_ras_recovery) && + mca_funcs && mca_funcs->mca_set_debug_mode) return -EOPNOTSUPP; if (block_obj->hw_ops->reset_ras_error_count) @@ -2692,7 +2755,8 @@ static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) return; /* Init poison supported flag, the default value is false */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (adev->gmc.xgmi.connected_to_cpu || + adev->gmc.is_app_apu) { /* enabled by default when GPU is connected to CPU */ con->poison_supported = true; } else if (adev->df.funcs && @@ -3529,11 +3593,10 @@ static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data for_each_ras_error(err_node, err_data) { ref_id = &err_node->err_info.mcm_info; - if ((mcm_info->socket_id >= 0 && mcm_info->socket_id != ref_id->socket_id) || - (mcm_info->die_id >= 0 && mcm_info->die_id != ref_id->die_id)) - continue; - return err_node; + if (mcm_info->socket_id == ref_id->socket_id && + mcm_info->die_id == ref_id->die_id) + return err_node; } return NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 2fdfef62ee27..665414c22ca9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -515,10 +515,7 @@ struct ras_manager { /* IH data */ struct ras_ih_data ih_data; - struct { - unsigned long ue_count; - unsigned long ce_count; - } err_data; + struct ras_err_data err_data; }; struct ras_badpage { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index e8cbc4142d80..1d9d187de6ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -292,27 +292,6 @@ out: return err; } -void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev) -{ - struct amdgpu_ring *sdma; - int i; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.has_page_queue) { - sdma = &adev->sdma.instance[i].page; - if (adev->mman.buffer_funcs_ring == sdma) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - break; - } - } - sdma = &adev->sdma.instance[i].ring; - if (adev->mman.buffer_funcs_ring == sdma) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - break; - } - } -} - int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) { int err = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 513ac22120c1..173a2a308078 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -169,7 +169,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, bool duplicate); void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); -void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev); int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index f74347cc087a..d65e21914d8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -166,8 +166,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, } } - if (reset) + if (reset) { + /* use mode-2 reset for poison consumption */ + if (!entry) + con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; amdgpu_ras_reset_gpu(adev); + } } kfree(err_data->err_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f3c9f93d8899..904252456d25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -844,6 +844,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * @immediate: immediate submission in a page fault * @unlocked: unlocked invalidation during MM callback * @flush_tlb: trigger tlb invalidation after update completed + * @allow_override: change MTYPE for local NUMA nodes * @resv: fences we need to sync to * @start: start of mapped range * @last: last mapped entry @@ -860,7 +861,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * 0 for success, negative erro code for failure. */ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, @@ -898,6 +899,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.immediate = immediate; params.pages_addr = pages_addr; params.unlocked = unlocked; + params.allow_override = allow_override; /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1073,6 +1075,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct ttm_resource *mem; struct dma_fence **last_update; bool flush_tlb = clear; + bool uncached; struct dma_resv *resv; uint64_t vram_base; uint64_t flags; @@ -1110,9 +1113,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); vram_base = bo_adev->vm_manager.vram_base_offset; + uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; } else { flags = 0x0; vram_base = 0; + uncached = false; } if (clear || (bo && bo->tbo.base.resv == @@ -1146,7 +1151,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, trace_amdgpu_vm_bo_update(mapping); r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, - resv, mapping->start, mapping->last, + !uncached, resv, mapping->start, mapping->last, update_flags, mapping->offset, vram_base, mem, pages_addr, last_update); @@ -1341,8 +1346,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, - mapping->start, mapping->last, + r = amdgpu_vm_update_range(adev, vm, false, false, true, false, + resv, mapping->start, mapping->last, init_pte_value, 0, 0, NULL, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); @@ -1368,6 +1373,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm + * @ticket: optional reservation ticket used to reserve the VM * * Make sure all BOs which are moved are updated in the PTs. * @@ -1377,11 +1383,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * PTs have to be reserved! */ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket) { struct amdgpu_bo_va *bo_va; struct dma_resv *resv; - bool clear; + bool clear, unlock; int r; spin_lock(&vm->status_lock); @@ -1404,17 +1411,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); /* Try to reserve the BO to avoid clearing its ptes */ - if (!adev->debug_vm && dma_resv_trylock(resv)) + if (!adev->debug_vm && dma_resv_trylock(resv)) { + clear = false; + unlock = true; + /* The caller is already holding the reservation lock */ + } else if (ticket && dma_resv_locking_ctx(resv) == ticket) { clear = false; + unlock = false; /* Somebody else is using the BO right now */ - else + } else { clear = true; + unlock = false; + } r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; - if (!clear) + if (unlock) dma_resv_unlock(resv); spin_lock(&vm->status_lock); } @@ -2618,8 +2632,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, goto error_unlock; } - r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, - addr, flags, value, 0, NULL, NULL, NULL); + r = amdgpu_vm_update_range(adev, vm, true, false, false, false, + NULL, addr, addr, flags, value, 0, NULL, NULL, NULL); if (r) goto error_unlock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 411d42fecfb6..2cd86d2bf73f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -246,6 +246,12 @@ struct amdgpu_vm_update_params { * @table_freed: return true if page table is freed when updating */ bool table_freed; + + /** + * @allow_override: true for memory that is not uncached: allows MTYPE + * to be overridden for NUMA local memory. + */ + bool allow_override; }; struct amdgpu_vm_update_funcs { @@ -437,11 +443,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm); + struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 9b025fd17b84..a2287bb25223 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -843,7 +843,7 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, */ if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) && adev->gmc.gmc_funcs->override_vm_pte_flags && - num_possible_nodes() > 1 && !params->pages_addr) + num_possible_nodes() > 1 && !params->pages_addr && params->allow_override) amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags); params->vm->update_funcs->update(params, pt, pe, addr, count, incr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 18f58efc9dc7..08916538a615 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -77,7 +77,16 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) return true; } +static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) +{ + struct drm_buddy_block *block; + u64 size = 0; + list_for_each_entry(block, head, link) + size += amdgpu_vram_mgr_block_size(block); + + return size; +} /** * DOC: mem_info_vram_total @@ -516,6 +525,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, mutex_unlock(&mgr->lock); vres->base.start = 0; + size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), + vres->base.size); list_for_each_entry(block, &vres->blocks, link) { unsigned long start; @@ -523,8 +534,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, amdgpu_vram_mgr_block_size(block); start >>= PAGE_SHIFT; - if (start > PFN_UP(vres->base.size)) - start -= PFN_UP(vres->base.size); + if (start > PFN_UP(size)) + start -= PFN_UP(size); else start = 0; vres->base.start = max(vres->base.start, start); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index e63abdf52b6c..4dfaa017cf7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1709,10 +1709,6 @@ static void cik_program_aspm(struct amdgpu_device *adev) if (pci_is_root_bus(adev->pdev->bus)) return; - /* XXX double check APUs */ - if (adev->flags & AMD_IS_APU) - return; - orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ee5dce6f6043..a3fccc4c1f43 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -308,8 +308,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; @@ -498,9 +496,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c new file mode 100644 index 000000000000..a47960a0babd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v4_6_2.h" + +static bool df_v4_6_2_query_ras_poison_mode(struct amdgpu_device *adev) +{ + /* return true since related regs are inaccessible */ + return true; +} + +const struct amdgpu_df_funcs df_v4_6_2_funcs = { + .query_ras_poison_mode = df_v4_6_2_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h new file mode 100644 index 000000000000..3bc3e6d216e2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V4_6_2_H__ +#define __DF_V4_6_2_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_df_funcs df_v4_6_2_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d9ccacd06fba..c8a3bf01743f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3498,6 +3498,8 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, unsigned int vmid); +static int gfx_v10_0_set_powergating_state(void *handle, + enum amd_powergating_state state); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); @@ -6465,11 +6467,18 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { + mutex_lock(&adev->srbm_mutex); + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1) + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + + nv_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; @@ -6743,7 +6752,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.kiq[0].mqd_backup) - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -6766,7 +6775,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -6787,11 +6796,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); @@ -7172,6 +7181,13 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + /* WA added for Vangogh asic fixing the SMU suspend failure + * It needs to set power gating again during gfxoff control + * otherwise the gfxoff disallowing will be failed to set. + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) + gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); + if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { if (amdgpu_gfx_disable_kgq(adev, 0)) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index fd22943685f7..0c6133cc5e57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -155,6 +155,7 @@ static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue { amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | + PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ @@ -3714,11 +3715,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; @@ -4007,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.kiq[0].mqd_backup) - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -4030,7 +4031,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -4051,11 +4052,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 362bf51ab1d2..41bbabd9ad4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -256,6 +256,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); WREG32(scratch_reg0_offset, 0xCAFEDEAD); + tmp = RREG32(scratch_reg0_offset); r = amdgpu_ring_alloc(ring, 3); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 19eaada35ede..4713a62ad586 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -73,7 +73,8 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (!adev->in_s0ix) + if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend || + amdgpu_in_reset(adev))) amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5fed01e34928..b66c5f7e1c56 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1251,12 +1251,15 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, return; } - /* Only override mappings with MTYPE_NC, which is the safe default for - * cacheable memory. + /* MTYPE_NC is the same default and can be overridden. + * MTYPE_UC will be present if the memory is extended-coherent + * and can also be overridden. */ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != - AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { - dev_dbg_ratelimited(adev->dev, "MTYPE is not NC\n"); + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) && + (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != + AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) { + dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n"); return; } @@ -1283,15 +1286,23 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, vm->mem_id, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; - unsigned int mtype_local = MTYPE_RW; + if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) == + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { + unsigned int mtype_local = MTYPE_RW; - if (amdgpu_mtype_local == 1) - mtype_local = MTYPE_NC; - else if (amdgpu_mtype_local == 2) - mtype_local = MTYPE_CC; + if (amdgpu_mtype_local == 1) + mtype_local = MTYPE_NC; + else if (amdgpu_mtype_local == 2) + mtype_local = MTYPE_CC; + + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(mtype_local); + } else { + /* MTYPE_UC case */ + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); + } - *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | - AMDGPU_PTE_MTYPE_VG10(mtype_local); dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n", old_flags, *flags); } @@ -2018,11 +2029,8 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ - if (amdgpu_sriov_vf(adev)) - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); - else - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index e523627cfe25..df218d5ca775 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -28,6 +28,7 @@ #include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> +#include <linux/device.h> #include <linux/pci.h> #define smnPCIE_CONFIG_CNTL 0x11180044 @@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; - if (pci_is_thunderbolt_attached(adev->pdev)) + if (dev_is_removable(&adev->pdev->dev)) data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; else data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; @@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; - if (pci_is_thunderbolt_attached(adev->pdev)) + if (dev_is_removable(&adev->pdev->dev)) data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; else data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index def89379b51a..4df1055e640a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -254,7 +254,7 @@ static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL); @@ -283,7 +283,7 @@ static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index eccb006e78aa..23f26f8caad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -56,8 +56,15 @@ static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { u32 tmp; + tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + /* If it is VF or subrevision holds a non-zero value, that should be used */ + if (tmp || amdgpu_sriov_vf(adev)) + return tmp; + + /* If discovery subrev is not updated, use register version */ tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0); + tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); return tmp; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 1995c7459f20..4d7976b77767 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -513,11 +513,10 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void nv_program_aspm(struct amdgpu_device *adev) { - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } @@ -609,9 +608,8 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, if (adev->gfx.funcs->update_perfmon_mgcg) adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->enable_aspm) && - amdgpu_device_should_use_aspm(adev)) + if (adev->nbio.funcs->enable_aspm && + amdgpu_device_should_use_aspm(adev)) adev->nbio.funcs->enable_aspm(adev, !enter); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 4142e2fcd866..3cf4684d0d3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -759,6 +759,83 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp) return 0; } + +static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev, + uint32_t inst, + uint32_t boot_error) +{ + uint32_t socket_id; + uint32_t aid_id; + uint32_t hbm_id; + uint32_t reg_data; + + socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID); + aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID); + hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID); + + reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109); + dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n", + socket_id, aid_id, reg_data); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n", + socket_id, aid_id, hbm_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD)) + dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING)) + dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n", + socket_id, aid_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n", + socket_id, aid_id, hbm_id); + + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n", + socket_id, aid_id, hbm_id); +} + +static int psp_v13_0_query_boot_status(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + int inst_mask = adev->aid_mask; + uint32_t reg_data; + uint32_t i; + int ret = 0; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) + return 0; + + if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007) + return 0; + + for_each_inst(i, inst_mask) { + reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126); + if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) { + psp_v13_0_boot_error_reporting(adev, i, reg_data); + ret = -EINVAL; + break; + } + } + + return ret; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -781,6 +858,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .update_spirom = psp_v13_0_update_spirom, .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, + .query_boot_status = psp_v13_0_query_boot_status, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index b58a13bd75db..45377a175250 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -339,8 +339,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -474,9 +472,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index c5ea32687eb5..2ad615be4bb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -513,8 +513,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -746,9 +744,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 683d51ae4bf1..3d68dd5523c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -877,8 +877,6 @@ static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); @@ -913,8 +911,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, @@ -1402,13 +1398,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(page); if (r) return r; - - if (adev->mman.buffer_funcs_ring == page) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return r; @@ -1921,11 +1911,8 @@ static int sdma_v4_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1964,7 +1951,6 @@ static int sdma_v4_0_resume(void *handle) if (adev->in_s0ix) { sdma_v4_0_enable(adev, true); sdma_v4_0_gfx_enable(adev, true); - amdgpu_ttm_set_buffer_funcs_status(adev, true); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index c1ff5eda8961..3c485e5a531a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -559,8 +559,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -825,9 +823,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1426,11 +1421,8 @@ static int sdma_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v5_0_ctx_switch_enable(adev, false); sdma_v5_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 7d1e57189c8c..83c240f741b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -364,8 +364,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -625,9 +623,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1284,11 +1279,8 @@ static int sdma_v5_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v5_2_ctx_switch_enable(adev, false); sdma_v5_2_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 7e4d5188cbfa..3c7ddd219de8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -348,8 +348,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0); @@ -561,9 +559,6 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -1308,11 +1303,8 @@ static int sdma_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) { - /* disable the scheduler for SDMA */ - amdgpu_sdma_unset_buffer_funcs_helper(adev); + if (amdgpu_sriov_vf(adev)) return 0; - } sdma_v6_0_ctxempty_int_enable(adev, false); sdma_v6_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4b81f29e5fd5..a757526153e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2440,8 +2440,6 @@ static void si_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (adev->flags & AMD_IS_APU) - return; orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); data &= ~LC_XMIT_N_FTS_MASK; data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 42c4547f32ec..9aa0e11ee673 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -115,8 +115,6 @@ static void si_dma_stop(struct amdgpu_device *adev) u32 rb_cntl; unsigned i; - amdgpu_sdma_unset_buffer_funcs_helper(adev); - for (i = 0; i < adev->sdma.num_instances; i++) { /* dma0 */ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); @@ -177,9 +175,6 @@ static int si_dma_start(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 66ed28136bc8..d4b8d62f4294 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -646,8 +646,7 @@ static void soc15_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 8c6cab641a1c..d5083c549330 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -433,8 +433,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 025e6aeb058d..770b4b4e3138 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -88,6 +88,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } +static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +{ + return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); +} + +static bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +{ + return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || + /* Identify data parity error in replay mode */ + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && + !(umc_v12_0_is_uncorrectable_error(mc_umc_status))))); +} + static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, uint64_t umc_reg_offset, unsigned long *error_count) @@ -104,10 +124,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0))) + if (umc_v12_0_is_correctable_error(mc_umc_status)) *error_count += 1; } @@ -125,11 +142,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + if (umc_v12_0_is_uncorrectable_error(mc_umc_status)) *error_count += 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index 46bfdee79bfd..c4c77257710c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -336,7 +336,7 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { - uint64_t mc_umc_status; + uint16_t ecc_ce_cnt; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -345,12 +345,10 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic umc_inst * adev->umc.channel_inst_num + ch_inst; - /* check the MCUMC_STATUS */ - mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) { - *error_count += 1; - } + /* Retrieve CE count */ + ecc_ce_cnt = ras->umc_ecc.ecc[eccinfo_table_idx].ce_count_lo_chip; + if (ecc_ce_cnt) + *error_count += ecc_ce_cnt; } static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6a8494f98d3e..1a98812981f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1124,11 +1124,10 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev)) return; - if (adev->flags & AMD_IS_APU || - adev->asic_type < CHIP_POLARIS10) + if (adev->asic_type < CHIP_POLARIS10) return; orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c index 756f39348dd9..174f13eff575 100644 --- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c @@ -205,19 +205,21 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe) static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe) { struct amdgpu_device *adev = vpe->ring.adev; - uint32_t rb_cntl, ib_cntl; + uint32_t queue_reset; + int ret; - rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 0); - WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl); + queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ)); + queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset); - ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 0); - WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl); + ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0, + VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); + if (ret) + dev_err(adev->dev, "VPE queue reset failed\n"); vpe->ring.sched.ready = false; - return 0; + return ret; } static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 0e792a8496d6..cd8e459201f1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1404,6 +1404,66 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, return i; } +static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, + struct kfd_gpu_cache_info *pcache_info) +{ + struct amdgpu_device *adev = kdev->adev; + int i = 0; + + /* TCP L1 Cache per CU */ + if (adev->gfx.config.gc_tcp_size_per_cu) { + pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu; + pcache_info[i].cache_level = 1; + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE); + pcache_info[i].num_cu_shared = 1; + i++; + } + /* Scalar L1 Instruction Cache per SQC */ + if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) { + pcache_info[i].cache_size = + adev->gfx.config.gc_l1_instruction_cache_size_per_sqc; + pcache_info[i].cache_level = 1; + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_INST_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE); + pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc; + i++; + } + /* Scalar L1 Data Cache per SQC */ + if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) { + pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc; + pcache_info[i].cache_level = 1; + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE); + pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc; + i++; + } + /* L2 Data Cache per GPU (Total Tex Cache) */ + if (adev->gfx.config.gc_tcc_size) { + pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size; + pcache_info[i].cache_level = 2; + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE); + pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; + i++; + } + /* L3 Data Cache per GPU */ + if (adev->gmc.mall_size) { + pcache_info[i].cache_size = adev->gmc.mall_size / 1024; + pcache_info[i].cache_level = 3; + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE); + pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; + i++; + } + return i; +} + int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; @@ -1461,10 +1521,14 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc num_of_cache_types = ARRAY_SIZE(vega20_cache_info); break; case IP_VERSION(9, 4, 2): - case IP_VERSION(9, 4, 3): *pcache_info = aldebaran_cache_info; num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); break; + case IP_VERSION(9, 4, 3): + num_of_cache_types = + kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd, + *pcache_info); + break; case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): *pcache_info = raven_cache_info; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 81d25a679427..6c25dab051d5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -442,10 +442,10 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", cpages, npages); else - pr_debug("0x%lx pages collected\n", cpages); + pr_debug("0x%lx pages migrated\n", cpages); r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); @@ -479,8 +479,6 @@ out: * svm_migrate_ram_to_vram - migrate svm range from system to device * @prange: range structure * @best_loc: the device to migrate to - * @start_mgr: start page to migrate - * @last_mgr: last page to migrate * @mm: the process mm structure * @trigger: reason of migration * @@ -491,7 +489,6 @@ out: */ static int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start_mgr, unsigned long last_mgr, struct mm_struct *mm, uint32_t trigger) { unsigned long addr, start, end; @@ -501,30 +498,23 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, unsigned long cpages = 0; long r = 0; - if (!best_loc) { - pr_debug("svms 0x%p [0x%lx 0x%lx] migrate to sys ram\n", - prange->svms, start_mgr, last_mgr); + if (prange->actual_loc == best_loc) { + pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", + prange->svms, prange->start, prange->last, best_loc); return 0; } - if (start_mgr < prange->start || last_mgr > prange->last) { - pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", - start_mgr, last_mgr, prange->start, prange->last); - return -EFAULT; - } - node = svm_range_get_node_by_id(prange, best_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } - pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", - prange->svms, start_mgr, last_mgr, prange->start, prange->last, - best_loc); + pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, + prange->start, prange->last, best_loc); - start = start_mgr << PAGE_SHIFT; - end = (last_mgr + 1) << PAGE_SHIFT; + start = prange->start << PAGE_SHIFT; + end = (prange->last + 1) << PAGE_SHIFT; r = svm_range_vram_node_new(node, prange, true); if (r) { @@ -554,11 +544,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, if (cpages) { prange->actual_loc = best_loc; - prange->vram_pages = prange->vram_pages + cpages; - } else if (!prange->actual_loc) { - /* if no page migrated and all pages from prange are at - * sys ram drop svm_bo got from svm_range_vram_node_new - */ + svm_range_dma_unmap(prange); + } else { svm_range_vram_node_free(prange); } @@ -676,8 +663,9 @@ out_oom: * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * * Return: + * 0 - success with all pages migrated * negative values - indicate error - * positive values or zero - number of pages got migrated + * positive values - partial migration, number of pages not migrated */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, @@ -688,7 +676,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; - unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -738,10 +725,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", cpages, npages); else - pr_debug("0x%lx pages collected\n", cpages); + pr_debug("0x%lx pages migrated\n", cpages); r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); @@ -764,21 +751,17 @@ out_free: kvfree(buf); out: if (!r && cpages) { - mpages = cpages - upages; pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); } - - return r ? r : mpages; + return r ? r : upages; } /** * svm_migrate_vram_to_ram - migrate svm range from device to system * @prange: range structure * @mm: process mm, use current->mm if NULL - * @start_mgr: start page need be migrated to sys ram - * @last_mgr: last page need be migrated to sys ram * @trigger: reason of migration * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * @@ -788,7 +771,6 @@ out: * 0 - OK, otherwise error code */ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - unsigned long start_mgr, unsigned long last_mgr, uint32_t trigger, struct page *fault_page) { struct kfd_node *node; @@ -796,33 +778,26 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long addr; unsigned long start; unsigned long end; - unsigned long mpages = 0; + unsigned long upages = 0; long r = 0; - /* this pragne has no any vram page to migrate to sys ram */ if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", prange->start, prange->last); return 0; } - if (start_mgr < prange->start || last_mgr > prange->last) { - pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", - start_mgr, last_mgr, prange->start, prange->last); - return -EFAULT; - } - node = svm_range_get_node_by_id(prange, prange->actual_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", - prange->svms, prange, start_mgr, last_mgr, + prange->svms, prange, prange->start, prange->last, prange->actual_loc); - start = start_mgr << PAGE_SHIFT; - end = (last_mgr + 1) << PAGE_SHIFT; + start = prange->start << PAGE_SHIFT; + end = (prange->last + 1) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -841,21 +816,14 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed %ld to migrate prange %p\n", r, prange); break; } else { - mpages += r; + upages += r; } addr = next; } - if (r >= 0) { - prange->vram_pages -= mpages; - - /* prange does not have vram page set its actual_loc to system - * and drop its svm_bo ref - */ - if (prange->vram_pages == 0 && prange->ttm_res) { - prange->actual_loc = 0; - svm_range_vram_node_free(prange); - } + if (r >= 0 && !upages) { + svm_range_vram_node_free(prange); + prange->actual_loc = 0; } return r < 0 ? r : 0; @@ -865,23 +833,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, * svm_migrate_vram_to_vram - migrate svm range from device to device * @prange: range structure * @best_loc: the device to migrate to - * @start: start page need be migrated to sys ram - * @last: last page need be migrated to sys ram * @mm: process mm, use current->mm if NULL * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * - * migrate all vram pages in prange to sys ram, then migrate - * [start, last] pages from sys ram to gpu node best_loc. - * * Return: * 0 - OK, otherwise error code */ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, - struct mm_struct *mm, uint32_t trigger) + struct mm_struct *mm, uint32_t trigger) { int r, retries = 3; @@ -893,8 +855,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); do { - r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, - trigger, NULL); + r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); if (r) return r; } while (prange->actual_loc && --retries); @@ -902,21 +863,17 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, if (prange->actual_loc) return -EDEADLK; - return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); + return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); } int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, - struct mm_struct *mm, uint32_t trigger) + struct mm_struct *mm, uint32_t trigger) { - if (!prange->actual_loc || prange->actual_loc == best_loc) - return svm_migrate_ram_to_vram(prange, best_loc, start, last, - mm, trigger); - + if (!prange->actual_loc) + return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); else - return svm_migrate_vram_to_vram(prange, best_loc, start, last, - mm, trigger); + return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); } @@ -932,9 +889,10 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, */ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) { - unsigned long start, last, size; unsigned long addr = vmf->address; struct svm_range_bo *svm_bo; + enum svm_work_list_ops op; + struct svm_range *parent; struct svm_range *prange; struct kfd_process *p; struct mm_struct *mm; @@ -971,31 +929,51 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) mutex_lock(&p->svms.lock); - prange = svm_range_from_addr(&p->svms, addr, NULL); + prange = svm_range_from_addr(&p->svms, addr, &parent); if (!prange) { pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); r = -EFAULT; goto out_unlock_svms; } - mutex_lock(&prange->migrate_mutex); + mutex_lock(&parent->migrate_mutex); + if (prange != parent) + mutex_lock_nested(&prange->migrate_mutex, 1); if (!prange->actual_loc) goto out_unlock_prange; - /* Align migration range start and size to granularity size */ - size = 1UL << prange->granularity; - start = max(ALIGN_DOWN(addr, size), prange->start); - last = min(ALIGN(addr + 1, size) - 1, prange->last); + svm_range_lock(parent); + if (prange != parent) + mutex_lock_nested(&prange->lock, 1); + r = svm_range_split_by_granularity(p, mm, addr, parent, prange); + if (prange != parent) + mutex_unlock(&prange->lock); + svm_range_unlock(parent); + if (r) { + pr_debug("failed %d to split range by granularity\n", r); + goto out_unlock_prange; + } - r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, + vmf->page); if (r) pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange, start, last); + r, prange->svms, prange, prange->start, prange->last); + + /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ + if (p->xnack_enabled && parent == prange) + op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; + else + op = SVM_OP_UPDATE_RANGE_NOTIFIER; + svm_range_add_list_work(&p->svms, parent, mm, op); + schedule_deferred_list_work(&p->svms); out_unlock_prange: - mutex_unlock(&prange->migrate_mutex); + if (prange != parent) + mutex_unlock(&prange->migrate_mutex); + mutex_unlock(&parent->migrate_mutex); out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 9e48d10e848e..487f26368164 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -41,13 +41,9 @@ enum MIGRATION_COPY_DIR { }; int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger); - int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - unsigned long start, unsigned long last, - uint32_t trigger, struct page *fault_page); - + uint32_t trigger, struct page *fault_page); unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index bda88dc6e2fa..e67d06a42809 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) static int svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages) + unsigned long *hmm_pfns, uint32_t gpuidx) { enum dma_data_direction dir = DMA_BIDIRECTIONAL; dma_addr_t *addr = prange->dma_addr[gpuidx]; struct device *dev = adev->dev; struct page *page; - uint64_t vram_pages_dev; int i, r; if (!addr) { @@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, prange->dma_addr[gpuidx] = addr; } - vram_pages_dev = 0; addr += offset; for (i = 0; i < npages; i++) { if (svm_is_valid_dma_mapping_addr(dev, addr[i])) @@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, if (is_zone_device_page(page)) { struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; - vram_pages_dev++; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - bo_adev->kfd.pgmap.range.start; @@ -201,14 +198,13 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } - *vram_pages = vram_pages_dev; return 0; } static int svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint64_t *vram_pages) + unsigned long *hmm_pfns) { struct kfd_process *p; uint32_t gpuidx; @@ -227,7 +223,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, } r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, - hmm_pfns, gpuidx, vram_pages); + hmm_pfns, gpuidx); if (r) break; } @@ -353,7 +349,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, INIT_LIST_HEAD(&prange->child_list); atomic_set(&prange->invalid, 0); prange->validate_timestamp = 0; - prange->vram_pages = 0; mutex_init(&prange->migrate_mutex); mutex_init(&prange->lock); @@ -400,8 +395,6 @@ static void svm_range_bo_release(struct kref *kref) prange->start, prange->last); mutex_lock(&prange->lock); prange->svm_bo = NULL; - /* prange should not hold vram page now */ - WARN_ON(prange->actual_loc); mutex_unlock(&prange->lock); spin_lock(&svm_bo->list_lock); @@ -783,7 +776,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, prange->flags &= ~attrs[i].value; break; case KFD_IOCTL_SVM_ATTR_GRANULARITY: - prange->granularity = attrs[i].value; + prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F); break; default: WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); @@ -982,11 +975,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old, new->svm_bo = svm_range_bo_ref(old->svm_bo); new->ttm_res = old->ttm_res; - /* set new's vram_pages as old range's now, the acurate vram_pages - * will be updated during mapping - */ - new->vram_pages = min(old->vram_pages, new->npages); - spin_lock(&new->svm_bo->list_lock); list_add(&new->svm_bo_list, &new->svm_bo->range_list); spin_unlock(&new->svm_bo->list_lock); @@ -1147,6 +1135,66 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, list_add_tail(&pchild->child_list, &prange->child_list); } +/** + * svm_range_split_by_granularity - collect ranges within granularity boundary + * + * @p: the process with svms list + * @mm: mm structure + * @addr: the vm fault address in pages, to split the prange + * @parent: parent range if prange is from child list + * @prange: prange to split + * + * Trims @prange to be a single aligned block of prange->granularity if + * possible. The head and tail are added to the child_list in @parent. + * + * Context: caller must hold mmap_read_lock and prange->lock + * + * Return: + * 0 - OK, otherwise error code + */ +int +svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, + unsigned long addr, struct svm_range *parent, + struct svm_range *prange) +{ + struct svm_range *head, *tail; + unsigned long start, last, size; + int r; + + /* Align splited range start and size to granularity size, then a single + * PTE will be used for whole range, this reduces the number of PTE + * updated and the L1 TLB space used for translation. + */ + size = 1UL << prange->granularity; + start = ALIGN_DOWN(addr, size); + last = ALIGN(addr + 1, size) - 1; + + pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", + prange->svms, prange->start, prange->last, start, last, size); + + if (start > prange->start) { + r = svm_range_split(prange, start, prange->last, &head); + if (r) + return r; + svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); + } + + if (last < prange->last) { + r = svm_range_split(prange, prange->start, last, &tail); + if (r) + return r; + svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + } + + /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ + if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { + prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; + pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", + prange, prange->start, prange->last, + SVM_OP_ADD_RANGE_AND_MAP); + } + return 0; +} static bool svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) { @@ -1234,7 +1282,7 @@ svm_range_get_pte_flags(struct kfd_node *node, if (num_possible_nodes() <= 1) mapping_flags |= mtype_local; else - mapping_flags |= AMDGPU_VM_MTYPE_NC; + mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { mapping_flags |= AMDGPU_VM_MTYPE_UC; @@ -1269,7 +1317,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("[0x%llx 0x%llx]\n", start, last); - return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start, + return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start, last, init_pte_value, 0, 0, NULL, NULL, fence); } @@ -1376,8 +1424,8 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, * different memory partition based on fpfn/lpfn, we should use * same vm_manager.vram_base_offset regardless memory partition. */ - r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, - last_start, prange->start + i, + r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true, + NULL, last_start, prange->start + i, pte_flags, (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, @@ -1570,7 +1618,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_validate_context *ctx; unsigned long start, end, addr; struct kfd_process *p; - uint64_t vram_pages; void *owner; int32_t idx; int r = 0; @@ -1639,13 +1686,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } } - vram_pages = 0; start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; struct vm_area_struct *vma; - uint64_t vram_pages_vma; unsigned long next = 0; unsigned long offset; unsigned long npages; @@ -1674,11 +1719,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, if (!r) { offset = (addr - start) >> PAGE_SHIFT; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns, &vram_pages_vma); + hmm_range->hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); - else - vram_pages += vram_pages_vma; } svm_range_lock(prange); @@ -1704,19 +1747,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, addr = next; } - if (addr == end) { - prange->vram_pages = vram_pages; - - /* if prange does not include any vram page and it - * has not released svm_bo drop its svm_bo reference - * and set its actaul_loc to sys ram - */ - if (!vram_pages && prange->ttm_res) { - prange->actual_loc = 0; - svm_range_vram_node_free(prange); - } - } - svm_range_unreserve_bos(ctx); if (!r) prange->validate_timestamp = ktime_get_boottime(); @@ -1969,7 +1999,6 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new->actual_loc = old->actual_loc; new->granularity = old->granularity; new->mapped_to_gpu = old->mapped_to_gpu; - new->vram_pages = old->vram_pages; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); @@ -2035,6 +2064,7 @@ svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, * @update_list: output, the ranges need validate and update GPU mapping * @insert_list: output, the ranges need insert to svms * @remove_list: output, the ranges are replaced and need remove from svms + * @remap_list: output, remap unaligned svm ranges * * Check if the virtual address range has overlap with any existing ranges, * split partly overlapping ranges and add new ranges in the gaps. All changes @@ -2876,7 +2906,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { - unsigned long start, last, size; struct mm_struct *mm = NULL; struct svm_range_list *svms; struct svm_range *prange; @@ -3012,35 +3041,32 @@ retry_write_locked: kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); - if (prange->actual_loc != 0 || best_loc != 0) { + if (prange->actual_loc != best_loc) { migration = true; - /* Align migration range start and size to granularity size */ - size = 1UL << prange->granularity; - start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); - last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); - if (best_loc) { - r = svm_migrate_to_vram(prange, best_loc, start, last, - mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + r = svm_migrate_to_vram(prange, best_loc, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); if (r) { pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", r, addr); /* Fallback to system memory if migration to * VRAM failed */ - if (prange->actual_loc && prange->actual_loc != best_loc) - r = svm_migrate_vram_to_ram(prange, mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); + if (prange->actual_loc) + r = svm_migrate_vram_to_ram(prange, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); else r = 0; } } else { - r = svm_migrate_vram_to_ram(prange, mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); + r = svm_migrate_vram_to_ram(prange, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); } if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", - r, svms, start, last); + r, svms, prange->start, prange->last); goto out_unlock_range; } } @@ -3394,24 +3420,18 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, *migrated = false; best_loc = svm_range_best_prefetch_location(prange); - /* when best_loc is a gpu node and same as prange->actual_loc - * we still need do migration as prange->actual_loc !=0 does - * not mean all pages in prange are vram. hmm migrate will pick - * up right pages during migration. - */ - if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || - (best_loc == 0 && prange->actual_loc == 0)) + if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || + best_loc == prange->actual_loc) return 0; if (!best_loc) { - r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, + r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH, NULL); *migrated = !r; return r; } - r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, - mm, KFD_MIGRATE_TRIGGER_PREFETCH); + r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); *migrated = !r; return r; @@ -3466,11 +3486,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) mutex_lock(&prange->migrate_mutex); do { - /* migrate all vram pages in this prange to sys ram - * after that prange->actual_loc should be zero - */ r = svm_migrate_vram_to_ram(prange, mm, - prange->start, prange->last, KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); } while (!r && prange->actual_loc && --retries); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 026863a0abcd..c528df1d0ba2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -78,7 +78,6 @@ struct svm_work_list_item { * @update_list:link list node used to add to update_list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages - * @vram_pages: vram pages number in this svm_range * @dma_addr: dma mapping address on each GPU for system memory physical page * @ttm_res: vram ttm resource map * @offset: range start offset within mm_nodes @@ -89,9 +88,7 @@ struct svm_work_list_item { * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* * @perferred_loc: perferred location, 0 for CPU, or GPU id * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id - * @actual_loc: this svm_range location. 0: all pages are from sys ram; - * GPU id: this svm_range may include vram pages from GPU with - * id actual_loc. + * @actual_loc: the actual location, 0 for CPU, or GPU id * @granularity:migration granularity, log2 num pages * @invalid: not 0 means cpu page table is invalidated * @validate_timestamp: system timestamp when range is validated @@ -115,7 +112,6 @@ struct svm_range { struct list_head list; struct list_head update_list; uint64_t npages; - uint64_t vram_pages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; uint64_t offset; @@ -172,6 +168,9 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear); void svm_range_vram_node_free(struct svm_range *prange); +int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, + unsigned long addr, struct svm_range *parent, + struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 4e530791507e..dc7c8312e8c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1602,10 +1602,13 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, unsigned int cu_sibling_map_mask; int first_active_cu; int i, j, k, xcc, start, end; + int num_xcc = NUM_XCC(knode->xcc_mask); struct kfd_cache_properties *pcache = NULL; + enum amdgpu_memory_partition mode; + struct amdgpu_device *adev = knode->adev; start = ffs(knode->xcc_mask) - 1; - end = start + NUM_XCC(knode->xcc_mask); + end = start + num_xcc; cu_sibling_map_mask = cu_info->bitmap[start][0][0]; cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); @@ -1624,7 +1627,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, pcache->processor_id_low = cu_processor_id + (first_active_cu - 1); pcache->cache_level = pcache_info[cache_type].cache_level; - pcache->cache_size = pcache_info[cache_type].cache_size; + + if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3)) + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + else + mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (pcache->cache_level == 2) + pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; + else if (mode) + pcache->cache_size = pcache_info[cache_type].cache_size / mode; + else + pcache->cache_size = pcache_info[cache_type].cache_size; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c index 353597fc908d..90ddd8371176 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -89,9 +89,10 @@ EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc); void amdgpu_xcp_drv_release(void) { for (--pdev_num; pdev_num >= 0; --pdev_num) { - devres_release_group(&xcp_dev[pdev_num]->pdev->dev, NULL); - platform_device_unregister(xcp_dev[pdev_num]->pdev); - xcp_dev[pdev_num]->pdev = NULL; + struct platform_device *pdev = xcp_dev[pdev_num]->pdev; + + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); xcp_dev[pdev_num] = NULL; } pdev_num = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9a712791f309..6f99f6754c11 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } - init_data.flags.gpu_vm_support = adev->mode_info.gpu_vm_support; + adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -6513,6 +6513,9 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) return; } + if (drm_detect_hdmi_monitor(edid)) + init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; + aconnector->edid = edid; aconnector->dc_em_sink = dc_link_add_remote_sink( @@ -9877,7 +9880,7 @@ static int dm_update_plane_state(struct dc *dc, /* Block top most plane from being a video plane */ if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) + if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; *is_top_most_overlay = false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 97b7a0b8a1c2..cb0b48bb2a7d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -96,7 +96,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } -static void vblank_control_worker(struct work_struct *work) +static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); @@ -151,7 +151,7 @@ static void vblank_control_worker(struct work_struct *work) kfree(vblank_work); } -static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); @@ -191,7 +191,7 @@ skip: if (!work) return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); + INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker); work->dm = dm; work->acrtc = acrtc; work->enable = enable; @@ -209,15 +209,15 @@ skip: int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) { - return dm_set_vblank(crtc, true); + return amdgpu_dm_crtc_set_vblank(crtc, true); } void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) { - dm_set_vblank(crtc, false); + amdgpu_dm_crtc_set_vblank(crtc, false); } -static void dm_crtc_destroy_state(struct drm_crtc *crtc, +static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dm_crtc_state *cur = to_dm_crtc_state(state); @@ -233,7 +233,7 @@ static void dm_crtc_destroy_state(struct drm_crtc *crtc, kfree(state); } -static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc) { struct dm_crtc_state *state, *cur; @@ -273,12 +273,12 @@ static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } -static void dm_crtc_reset_state(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc) { struct dm_crtc_state *state; if (crtc->state) - dm_crtc_destroy_state(crtc, crtc->state); + amdgpu_dm_crtc_destroy_state(crtc, crtc->state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (WARN_ON(!state)) @@ -298,12 +298,12 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { - .reset = dm_crtc_reset_state, + .reset = amdgpu_dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = dm_crtc_duplicate_state, - .atomic_destroy_state = dm_crtc_destroy_state, + .atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state, + .atomic_destroy_state = amdgpu_dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, @@ -316,11 +316,11 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #endif }; -static void dm_crtc_helper_disable(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; @@ -352,8 +352,8 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) return num_active; } -static void dm_update_crtc_active_planes(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -364,18 +364,18 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, return; dm_new_crtc_state->active_planes = - count_crtc_active_planes(new_crtc_state); + amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state); } -static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, +static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } -static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *state) +static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); @@ -386,7 +386,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, trace_amdgpu_dm_crtc_atomic_check(crtc_state); - dm_update_crtc_active_planes(crtc, crtc_state); + amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state); if (WARN_ON(unlikely(!dm_crtc_state->stream && amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { @@ -429,9 +429,9 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { - .disable = dm_crtc_helper_disable, - .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup, + .disable = amdgpu_dm_crtc_helper_disable, + .atomic_check = amdgpu_dm_crtc_helper_atomic_check, + .mode_fixup = amdgpu_dm_crtc_helper_mode_fixup, .get_scanout_position = amdgpu_crtc_get_scanout_position, }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 1259d6351c50..13a177d34376 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3645,7 +3645,9 @@ static int capabilities_show(struct seq_file *m, void *unused) struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct dc *dc = adev->dm.dc; bool mall_supported = dc->caps.mall_size_total; + bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; unsigned int mall_in_use = false; + unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); struct hubbub *hubbub = dc->res_pool->hubbub; if (hubbub->funcs->get_mall_en) @@ -3653,6 +3655,8 @@ static int capabilities_show(struct seq_file *m, void *unused) seq_printf(m, "mall supported: %s, enabled: %s\n", mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); + seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", + subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no"); return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 03df26bd8e83..116121e647ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -139,7 +139,7 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state } } -static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) { if (!*mods) return; @@ -164,12 +164,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_ *size += 1; } -static bool modifier_has_dcc(uint64_t modifier) +static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) { return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -177,8 +177,8 @@ static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) return AMD_FMT_MOD_GET(TILE, modifier); } -static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, - uint64_t tiling_flags) +static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, + uint64_t tiling_flags) { /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { @@ -209,8 +209,8 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); } -static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = @@ -230,9 +230,9 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } -static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info, - uint64_t modifier) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info, + uint64_t modifier) { unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); @@ -241,7 +241,7 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev pipes_log2 = min(5u, mod_pipe_xor_bits); - fill_gfx9_tiling_info_from_device(adev, tiling_info); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); if (!IS_AMD_FMT_MOD(modifier)) return; @@ -258,13 +258,13 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev } } -static int validate_dcc(struct amdgpu_device *adev, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const union dc_tiling_info *tiling_info, - const struct dc_plane_dcc_param *dcc, - const struct dc_plane_address *address, - const struct plane_size *plane_size) +static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -303,23 +303,23 @@ static int validate_dcc(struct amdgpu_device *adev, return 0; } -static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const struct plane_size *plane_size, - union dc_tiling_info *tiling_info, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - const bool force_disable_dcc) +static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + union dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + const bool force_disable_dcc) { const uint64_t modifier = afb->base.modifier; int ret = 0; - fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); - tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); - if (modifier_has_dcc(modifier) && !force_disable_dcc) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); @@ -347,60 +347,64 @@ static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, address->grph.meta_addr.high_part = upper_32_bits(dcc_address); } - ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); if (ret) - drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); return ret; } -static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx9_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pipe_xor_bits = min(8, pipes + @@ -421,163 +425,164 @@ static void add_gfx9_modifiers(const struct amdgpu_device *adev, */ if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } } -static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx11_modifiers(struct amdgpu_device *adev, +static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int num_pipes = 0; @@ -628,21 +633,21 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); - add_modifier(mods, size, capacity, modifier_dcc_best); - add_modifier(mods, size, capacity, modifier_dcc_4k); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); - add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_r_x); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); } -static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -654,15 +659,15 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); return *mods ? 0 : -ENOMEM; } switch (adev->family) { case AMDGPU_FAMILY_AI: case AMDGPU_FAMILY_RV: - add_gfx9_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: @@ -670,21 +675,21 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) - add_gfx10_3_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); else - add_gfx10_1_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: case AMDGPU_FAMILY_GC_11_5_0: - add_gfx11_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); break; } - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); /* INVALID marks the end of the list. */ - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); if (!*mods) return -ENOMEM; @@ -692,9 +697,9 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty return 0; } -static int get_plane_formats(const struct drm_plane *plane, - const struct dc_plane_cap *plane_cap, - uint32_t *formats, int max_formats) +static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { int i, num_formats = 0; @@ -818,22 +823,22 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, } if (adev->family >= AMDGPU_FAMILY_AI) { - ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, - rotation, plane_size, - tiling_info, dcc, - address, - force_disable_dcc); + ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address, + force_disable_dcc); if (ret) return ret; } else { - fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); } return 0; } -static int dm_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct amdgpu_framebuffer *afb; struct drm_gem_object *obj; @@ -928,8 +933,8 @@ error_unlock: return r; } -static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; int r; @@ -949,7 +954,7 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, amdgpu_bo_unref(&rbo); } -static void get_min_max_dc_plane_scaling(struct drm_device *dev, +static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, struct drm_framebuffer *fb, int *min_downscale, int *max_upscale) { @@ -1030,8 +1035,8 @@ int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, } /* Get min/max allowed scaling factors from plane caps. */ - get_min_max_dc_plane_scaling(state->crtc->dev, fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); /* * Convert to drm convention: 16.16 fixed point, instead of dc's * 1.0 == 1000. Also drm scaling is src/dst instead of dc's @@ -1101,8 +1106,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, /* Validate scaling per-format with DC plane caps */ if (state->plane && state->plane->dev && state->fb) { - get_min_max_dc_plane_scaling(state->plane->dev, state->fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); } else { min_downscale = 250; max_upscale = 16000; @@ -1128,8 +1133,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, return 0; } -static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); @@ -1167,8 +1172,8 @@ static int dm_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } -static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) { /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) @@ -1177,8 +1182,8 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane, return 0; } -static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, - struct dc_cursor_position *position) +static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int x, y; @@ -1241,7 +1246,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->crtc_id, plane->state->crtc_w, plane->state->crtc_h); - ret = get_cursor_position(plane, crtc, &position); + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); if (ret) return; @@ -1290,8 +1295,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, } } -static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); @@ -1315,14 +1320,14 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { - .prepare_fb = dm_plane_helper_prepare_fb, - .cleanup_fb = dm_plane_helper_cleanup_fb, - .atomic_check = dm_plane_atomic_check, - .atomic_async_check = dm_plane_atomic_async_check, - .atomic_async_update = dm_plane_atomic_async_update + .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, + .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, + .atomic_check = amdgpu_dm_plane_atomic_check, + .atomic_async_check = amdgpu_dm_plane_atomic_async_check, + .atomic_async_update = amdgpu_dm_plane_atomic_async_update }; -static void dm_drm_plane_reset(struct drm_plane *plane) +static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -1336,8 +1341,7 @@ static void dm_drm_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); } -static struct drm_plane_state * -dm_drm_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) { struct dm_plane_state *dm_plane_state, *old_dm_plane_state; @@ -1356,15 +1360,15 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane) return &dm_plane_state->base; } -static bool dm_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) +static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); int i; - enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; + enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; if (!info) return false; @@ -1401,7 +1405,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, info->cpp[0] < 8) return false; - if (modifier_has_dcc(modifier)) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { /* Per radeonsi comments 16/64 bpp are more complicated. */ if (info->cpp[0] != 4) return false; @@ -1415,8 +1419,8 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, return true; } -static void dm_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) +static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); @@ -1430,10 +1434,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, - .reset = dm_drm_plane_reset, - .atomic_duplicate_state = dm_drm_plane_duplicate_state, - .atomic_destroy_state = dm_drm_plane_destroy_state, - .format_mod_supported = dm_plane_format_mod_supported, + .reset = amdgpu_dm_plane_drm_plane_reset, + .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, + .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, + .format_mod_supported = amdgpu_dm_plane_format_mod_supported, }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1447,10 +1451,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned int supported_rotations; uint64_t *modifiers = NULL; - num_formats = get_plane_formats(plane, plane_cap, formats, - ARRAY_SIZE(formats)); + num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); - res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); if (res) return res; @@ -1520,7 +1524,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, return 0; } -bool is_video_format(uint32_t format) +bool amdgpu_dm_plane_is_video_format(uint32_t format) { int i; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 930f1572f898..b51a6b57bd9b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -62,5 +62,5 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value); -bool is_video_format(uint32_t format); +bool amdgpu_dm_plane_is_video_format(uint32_t format); #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e13d8bab0b33..4360a696f10a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3575,7 +3575,8 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state mpcc_inst = hubp->inst; // MPCC inst is equal to pipe index in practice for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { - if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { + if ((dc->res_pool->opps[opp_inst] != NULL) && + (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) { dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; break; @@ -4347,7 +4348,6 @@ static bool full_update_required(struct dc *dc, srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || - srf_updates[i].blend_tf || srf_updates[i].surface->force_full_update || (srf_updates[i].flip_addr && srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || @@ -4884,6 +4884,9 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) if (dc->debug.disable_idle_power_optimizations) return; + if (dc->caps.ips_support && dc->config.disable_ips) + return; + if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) return; @@ -4895,6 +4898,26 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } +bool dc_dmub_is_ips_idle_state(struct dc *dc) +{ + uint32_t idle_state = 0; + + if (dc->debug.disable_idle_power_optimizations) + return false; + + if (!dc->caps.ips_support || dc->config.disable_ips) + return false; + + if (dc->hwss.get_idle_state) + idle_state = dc->hwss.get_idle_state(dc); + + if ((idle_state & DMUB_IPS1_ALLOW_MASK) || + (idle_state & DMUB_IPS2_ALLOW_MASK)) + return true; + + return false; +} + /* set min and max memory clock to lowest and highest DPM level, respectively */ void dc_unlock_memory_clock_frequency(struct dc *dc) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 97f402123fbb..1d48278cba96 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -321,10 +321,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; - if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq) - res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); + if (dc->debug.using_dml2) + if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq) + res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); } else ASSERT_CRITICAL(false); } @@ -4228,7 +4229,7 @@ static void set_avi_info_frame( switch (stream->content_type) { case DISPLAY_CONTENT_TYPE_NO_DATA: hdmi_info.bits.CN0_CN1 = 0; - hdmi_info.bits.ITC = 0; + hdmi_info.bits.ITC = 1; break; case DISPLAY_CONTENT_TYPE_GRAPHICS: hdmi_info.bits.CN0_CN1 = 0; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6ed40b6c6178..4bdf105d1d71 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -533,7 +533,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; return tg->funcs->get_frame_count(tg); @@ -592,7 +592,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; tg->funcs->get_scanoutpos(tg, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 582d94c759f6..6e54ca055fcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -49,7 +49,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.255" +#define DC_VER "3.2.256" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -382,6 +382,7 @@ struct dc_cap_funcs { bool (*get_dcc_compression_cap)(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); + bool (*get_subvp_en)(struct dc *dc, struct dc_state *context); }; struct link_training_settings; @@ -2317,6 +2318,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ struct dc_cursor_attributes *cursor_attr); void dc_allow_idle_optimizations(struct dc *dc, bool allow); +bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ void dc_unlock_memory_clock_frequency(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 4fd3f09432be..ba142bef626b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1110,8 +1110,10 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { + const uint32_t max_num_polls = 10000; uint32_t allow_state = 0; uint32_t commit_state = 0; + uint32_t i; if (dc->debug.dmcub_emulation) return; @@ -1138,22 +1140,36 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(dc->debug.ips2_entry_delay_us); dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - do { + for (i = 0; i < max_num_polls; ++i) { commit_state = dc->hwss.get_idle_state(dc); - } while (commit_state & DMUB_IPS2_COMMIT_MASK); + if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) + break; + + udelay(1); + } + ASSERT(i < max_num_polls); if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - return; + /* TODO: See if we can return early here - IPS2 should go + * back directly to IPS0 and clear the flags, but it will + * be safer to directly notify DMCUB of this. + */ + allow_state = dc->hwss.get_idle_state(dc); } } dc_dmub_srv_notify_idle(dc, false); if (allow_state & DMUB_IPS1_ALLOW_MASK) { - do { + for (i = 0; i < max_num_polls; ++i) { commit_state = dc->hwss.get_idle_state(dc); - } while (commit_state & DMUB_IPS1_COMMIT_MASK); + if (!(commit_state & DMUB_IPS1_COMMIT_MASK)) + break; + + udelay(1); + } + ASSERT(i < max_num_polls); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index d1d8e904346e..b94c5c97eee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -554,6 +554,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 7eda4bbcd8ac..0a422fbb14bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_legacy_fast_update = true, + .using_dml2 = false, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index a11b2f6afe4a..bca22d867696 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -614,6 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static void dcn201_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 58a0d37e9523..42277b280586 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -654,6 +654,7 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 50dc83404644..11f7746f3a65 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -613,16 +613,19 @@ static void dpp3_program_blnd_pwl( REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); } else { + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 2861d974fcf6..75547ce86c09 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -316,7 +316,7 @@ bool hubp3_program_surface_flip_and_addr( return true; } -static void hubp3_program_tiling( +void hubp3_program_tiling( struct dcn20_hubp *hubp2, const union dc_tiling_info *info, const enum surface_pixel_format pixel_format) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h index 8a32772d4e91..b010531a7fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h @@ -278,6 +278,11 @@ void hubp3_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); +void hubp3_program_tiling( + struct dcn20_hubp *hubp2, + const union dc_tiling_info *info, + const enum surface_pixel_format pixel_format); + void hubp3_dcc_control(struct hubp *hubp, bool enable, enum hubp_ind_block_size blk_size); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 473581cff06b..7b259cb5f418 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -729,6 +729,7 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, .enable_legacy_fast_update = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index b4b3b52990b9..f3b75f283aa2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -701,7 +701,8 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, .use_max_lb = false, - .exit_idle_opt_for_cursor_updates = true + .exit_idle_opt_for_cursor_updates = true, + .using_dml2 = false, }; static void dcn301_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 06332bd4e625..63ac984a04f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -99,6 +99,7 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, .enable_legacy_fast_update = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h index 294bd757bcb5..2e12fb643005 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index 39cf7a50bd26..edb4d68b8187 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h index 66b1e3604f07..4949981126d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 0d91291a54a9..49cb7fde416a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ @@ -81,6 +99,7 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .exit_idle_opt_for_cursor_updates = true, .disable_idle_power_optimizations = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h index 9c7d79540900..37cf1525820b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 996d8c1e9d2a..96e45c9efb46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -1,5 +1,5 @@ # -# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved +# Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved # # All rights reserved. This notice is intended as a precaution against # inadvertent publication and does not imply publication or any waiver diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index f6b59c29cee2..5b5b5e0775fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -109,6 +109,28 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); } +static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + switch (hubp_inst) { + case 0: + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30); + break; + case 1: + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30); + break; + case 2: + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30); + break; + case 3: + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30); + break; + default: + break; + } +} + static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1041,6 +1063,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, + .wait_for_det_apply = dcn31_wait_for_det_apply, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index cdf005f91869..79416cfb22f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -893,6 +893,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 2d7436f2ea82..677361d74a4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -916,7 +916,7 @@ static const struct dc_debug_options debug_defaults_drv = { .hdmistream = true, .hdmichar = true, .dpstream = true, - .symclk32_se = true, + .symclk32_se = false, .symclk32_le = true, .symclk_fe = true, .physymclk = true, @@ -924,7 +924,8 @@ static const struct dc_debug_options debug_defaults_drv = { } }, - .seamless_boot_odm_combine = true + .seamless_boot_odm_combine = true, + .using_dml2 = false, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index c11dbb1f4033..cb8024eee8e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -889,6 +889,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 4220fe4cae4a..b9753d4606f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -885,6 +885,7 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 90f061edb64c..427cfc8c24a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn30_prepare_bandwidth, + .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index 1d052f08aff5..994b21ed272f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -237,16 +237,19 @@ void mpc32_program_post1dlut_pwl( REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); } else { + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green); + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 81b0588fa80b..89b072447dba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1993,7 +1993,8 @@ int dcn32_populate_dml_pipes_from_context( } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, @@ -2445,6 +2446,11 @@ static bool dcn32_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; @@ -2476,6 +2482,7 @@ static bool dcn32_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + dc->dml2_options.map_dc_pipes_with_callbacks = true; if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 44caf6711589..f7de3eca1225 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -732,6 +732,7 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_max_blank_us = 1000, .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, + .using_dml2 = false, }; static struct dce_aux *dcn321_aux_engine_create( @@ -1570,7 +1571,8 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool) } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -1998,6 +2000,11 @@ static bool dcn321_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c index 84a9afb7098a..3341ef71009b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h index 17f5344994f0..09b84307cd9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DPP_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c index d7915c96bcd1..71d2dff9986d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h index c19c2e022f12..133ad38842cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DSC_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h index 877f93c8168e..886e727ed080 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DWB_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c index f8e63bd541bc..339bf0c722dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h index d57ed580305e..54cf00ffceb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HUBBUB_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c index 2ae7b151b56c..771fcd0d3b99 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_hubp.h" @@ -51,11 +53,146 @@ static void hubp35_init(struct hubp *hubp) /*do nothing for now for dcn3.5 or later*/ } + +void hubp35_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t green_bar = 1; + uint32_t red_bar = 3; + uint32_t blue_bar = 2; + + /* swap for ABGR format */ + if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) { + red_bar = 2; + blue_bar = 3; + } + + REG_UPDATE_3(HUBPRET_CONTROL, + CROSSBAR_SRC_Y_G, green_bar, + CROSSBAR_SRC_CB_B, blue_bar, + CROSSBAR_SRC_CR_R, red_bar); + + /* Mapping is same as ipp programming (cnvc) */ + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 1); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 3); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 8); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 10); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */ + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/ + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 24); + break; + + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 65); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 64); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 67); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 66); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 12); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 112); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 113); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 114); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 118); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 119); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 116, + ALPHA_PLANE_EN, 0); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 116, + ALPHA_PLANE_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + /* don't see the need of program the xbar in DCN 1.0 */ +} + +void hubp35_program_surface_config( + struct hubp *hubp, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + unsigned int compat_level) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + hubp3_dcc_control_sienna_cichlid(hubp, dcc); + hubp3_program_tiling(hubp2, tiling_info, format); + hubp2_program_size(hubp, format, plane_size, dcc); + hubp2_program_rotation(hubp, rotation, horizontal_mirror); + hubp35_program_pixel_format(hubp, format); +} + struct hubp_funcs dcn35_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp3_program_surface_config, + .hubp_program_surface_config = hubp35_program_surface_config, .hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_setup = hubp3_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h index a8879c3db447..586b43aa5834 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HUBP_DCN35_H__ @@ -56,4 +58,18 @@ bool hubp35_construct( void hubp35_set_fgcg(struct hubp *hubp, bool enable); +void hubp35_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format); + +void hubp35_program_surface_config( + struct hubp *hubp, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + unsigned int compat_level); + #endif /* __DC_HUBP_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c index 534223dbe595..296bf3a38cb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dce110/dce110_hwseq.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h index ccfc28225cff..b67015032c35 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_DCN35_INIT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c index ea1042cdc88d..4317100564a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_mmhubbub.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h index e7b5b6703e73..098e13e07272 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_MMHUBBUB_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c index d79e8c6365c1..3542b51c9aac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_opp.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h index 9dd21b104287..a9a413527801 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_OPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c index b0c068240a94..a4a39f1638cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_optc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h index 7e7a5f4b85b0..1f422e4c468f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_OPTC_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index e62a192c595e..0f60c40e1fc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "reg_helper.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h index d073ce5cc6f3..3de240884d22 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DCN35_PG_CNTL_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index 99d55b958977..3c7c810bab1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dm_services.h" @@ -2084,6 +2086,11 @@ static bool dcn35_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h index 5ec70d46a38f..99aea102e3f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DCN35_RESOURCE_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 66431525f2a0..acff3449b8d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -20,7 +20,9 @@ # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # -# makefile for dml2 +# Authors: AMD +# +# Makefile for dml2. ifdef CONFIG_X86 dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float @@ -58,8 +60,12 @@ endif endif ifneq ($(CONFIG_FRAME_WARN),0) +ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) +frame_warn_flag := -Wframe-larger-than=3072 +else frame_warn_flag := -Wframe-larger-than=2048 endif +endif CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h index 5450aa5295f7..e450445bc05d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __CMNTYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 5f54251a559c..510be909cd75 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index c2fa28ff57ab..b274bfb4225f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_CORE_STRUCT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h index 99bdb2ddd8ab..de63364be01d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_LIB_DEFINES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c index 7dd1f8a12582..c247aee89caf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_util.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h index fb74385e1060..113b0265e1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_UTIL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index f45fbe820445..d2046e770c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_mall_phantom.h" @@ -756,6 +758,148 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id); } +static unsigned int get_mpc_factor(struct dml2_context *ctx, + const struct dc_state *state, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_stream_status *status, unsigned int stream_id, + int plane_idx) +{ + unsigned int plane_id; + unsigned int cfg_idx; + + get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id); + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); + if (ctx->architecture == dml2_architecture_20) + return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; + ASSERT(false); + return 1; +} + +static unsigned int get_odm_factor( + const struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_stream_state *stream) +{ + unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id( + mapping, stream->stream_id); + + if (ctx->architecture == dml2_architecture_20) + switch (disp_cfg->hw.ODMMode[cfg_idx]) { + case dml_odm_mode_bypass: + return 1; + case dml_odm_mode_combine_2to1: + return 2; + case dml_odm_mode_combine_4to1: + return 4; + default: + break; + } + ASSERT(false); + return 1; +} + +static void populate_mpc_factors_for_stream( + struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *state, + unsigned int stream_idx, + unsigned int odm_factor, + unsigned int mpc_factors[MAX_PIPES]) +{ + const struct dc_stream_status *status = &state->stream_status[stream_idx]; + unsigned int stream_id = state->streams[stream_idx]->stream_id; + int i; + + for (i = 0; i < status->plane_count; i++) + if (odm_factor == 1) + mpc_factors[i] = get_mpc_factor( + ctx, state, disp_cfg, mapping, status, + stream_id, i); + else + mpc_factors[i] = 1; +} + +static void populate_odm_factors(const struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *state, + unsigned int odm_factors[MAX_PIPES]) +{ + int i; + + for (i = 0; i < state->stream_count; i++) + odm_factors[i] = get_odm_factor( + ctx, disp_cfg, mapping, state->streams[i]); +} + +static bool map_dc_pipes_for_stream(struct dml2_context *ctx, + struct dc_state *state, + const struct dc_state *existing_state, + const struct dc_stream_state *stream, + const struct dc_stream_status *status, + unsigned int odm_factor, + unsigned int mpc_factors[MAX_PIPES]) +{ + int plane_idx; + bool result = true; + + if (odm_factor == 1) + /* + * ODM and MPC combines are by DML design mutually exclusive. + * ODM factor of 1 means MPC factors may be greater than 1. + * In this case, we want to set ODM factor to 1 first to free up + * pipe resources from previous ODM configuration before setting + * up MPC combine to acquire more pipe resources. + */ + result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + stream, + odm_factor); + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx]); + if (odm_factor > 1) + result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + stream, + odm_factor); + return result; +} + +static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx, + struct dc_state *state, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *existing_state) +{ + unsigned int odm_factors[MAX_PIPES]; + unsigned int mpc_factors_for_stream[MAX_PIPES]; + int i; + bool result = true; + + populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors); + for (i = 0; i < state->stream_count; i++) { + populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state, + i, odm_factors[i], mpc_factors_for_stream); + result &= map_dc_pipes_for_stream(ctx, state, existing_state, + state->streams[i], + &state->stream_status[i], + odm_factors[i], mpc_factors_for_stream); + } + return result; +} + bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state) { int stream_index, plane_index, i; @@ -770,6 +914,10 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; struct dc_pipe_mapping_scratch scratch; + if (ctx->config.map_dc_pipes_with_callbacks) + return map_dc_pipes_with_callbacks( + ctx, state, disp_cfg, mapping, existing_state); + if (ctx->architecture == dml2_architecture_21) { /* * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h index a78de194793d..2f91244a7b01 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_DC_RESOURCE_MGMT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h index 191b2e63ce6e..e85866db80ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index bcb59bcd9179..ed5b767d46e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_INTERNAL_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 02797cb2667e..32f8a43af3d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_dc_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h index c14b4de29d73..9d64851f54e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_MALL_PHANTOM_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c index f8e9aa32ceab..c4c52173ef22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_policy.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index e5ccd2887c94..89836f175a13 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" @@ -569,6 +571,8 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st * out->RefreshRate[location] = ((in->timing.pix_clk_100hz * 100) / in->timing.h_total) / in->timing.v_total; out->VFrontPorch[location] = in->timing.v_front_porch; out->PixelClock[location] = in->timing.pix_clk_100hz / 10000.00; + if (in->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + out->PixelClock[location] *= 2; out->HTotal[location] = in->timing.h_total; out->VTotal[location] = in->timing.v_total; out->Interlace[location] = in->timing.flags.INTERLACE; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h index 1bcfca51e665..dac6d27b14cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_TRANSLATION_HELPER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index ac6bf776bad0..69fd96f4f3b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ //#include "dml2_utils.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 9a5e145168bc..0a06bf3b135a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index f3b85b0891d3..317f90776d97 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DML2_WRAPPER_H_ @@ -71,6 +73,21 @@ struct dml2_dc_callbacks { bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context); bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); + bool (*update_pipes_for_stream_with_slice_count)( + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + const struct resource_pool *pool, + const struct dc_stream_state *stream, + int new_slice_count); + bool (*update_pipes_for_plane_with_slice_count)( + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + const struct resource_pool *pool, + const struct dc_plane_state *plane, + int slice_count); + int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); + int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); + struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); }; struct dml2_dc_svp_callbacks { @@ -152,6 +169,7 @@ struct dml2_configuration_options { struct dml2_soc_bbox_overrides bbox_overrides; unsigned int max_segments_per_hubp; unsigned int det_segment_size; + bool map_dc_pipes_with_callbacks; }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h index b5e463aa61af..17f0972b1af7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML_ASSERT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h index d8c7f7497e9c..f7d30b47beff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ /* This header intentinally does not include an #ifdef guard as it only contains includes for other headers*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h index 890c0a072012..2a2f84e07ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML_LOGGING_H__ #define __DML_LOGGING_H__ diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile index 4170b6eb9ec0..c1c47a6cefe1 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -1,4 +1,4 @@ -# Copyright 2019 Advanced Micro Devices, Inc. +# Copyright 2022 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 4d6493e0ccfc..608221b0dd5d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2746,6 +2746,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = dc->hwseq; unsigned int k1_div = PIXEL_RATE_DIV_NA; unsigned int k2_div = PIXEL_RATE_DIV_NA; + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { if (dc->hwseq->funcs.setup_hpo_hw_control) @@ -2765,6 +2767,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } else { + if (dccg->funcs->enable_symclk_se) + dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, + link_enc->transmitter - TRANSMITTER_UNIPHY_A); } if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 1c839e52bae5..d71faf2ecd41 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -993,11 +993,7 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !dc->clk_mgr->clks.fw_based_mclk_switching) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -1008,20 +1004,9 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } + if (!dc->clk_mgr->clks.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); } void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c index b48b732aa647..3bc56ac346f3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "dcn303_hwseq.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h index 8b69a3b76c11..7fdfc4175f80 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef __DC_HWSS_DCN303_H__ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index f369f7af6b3a..97798cee876e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -584,6 +584,17 @@ void dcn31_reset_hw_ctx_wrap( pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { struct clock_source *old_clk = pipe_ctx_old->clock_source; + /* Reset pipe which is seamless boot stream. */ + if (!pipe_ctx_old->plane_state && + dc->res_pool->hubbub->funcs->program_det_size && + dc->res_pool->hubbub->funcs->wait_for_det_apply) { + dc->res_pool->hubbub->funcs->program_det_size( + dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst, 0); + /* Wait det size changed. */ + dc->res_pool->hubbub->funcs->wait_for_det_apply( + dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst); + } + dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) hws->funcs.enable_stream_gating(dc, pipe_ctx_old); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 68dc99034eba..1b9f21fd4f17 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -50,6 +50,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dcn32/dcn32_resource.h" #include "link.h" +#include "../dcn20/dcn20_hwseq.h" #define DC_LOGGER_INIT(logger) @@ -217,7 +218,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { int i; - uint8_t num_ways = 0; + uint32_t num_ways = 0; uint32_t mall_ss_size_bytes = 0; mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; @@ -247,7 +248,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; - uint8_t ways, i; + uint8_t i; + uint32_t ways; int j; bool mall_ss_unsupported = false; struct dc_plane_state *plane = NULL; @@ -307,7 +309,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - cmd.cab.cab_alloc_ways = ways; + cmd.cab.cab_alloc_ways = (uint8_t)ways; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); @@ -492,6 +494,7 @@ bool dcn32_set_mcm_luts( } } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); + lut_params = NULL; // Shaper if (plane_state->in_shaper_func) { @@ -1676,3 +1679,33 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, return is_seamless; } + +void dcn32_prepare_bandwidth(struct dc *dc, + struct dc_state *context) +{ + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); + + dcn20_prepare_bandwidth(dc, context); + + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. + */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index 9992e40acd21..cecf7f0f5671 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -124,4 +124,7 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); +void dcn32_prepare_bandwidth(struct dc *dc, + struct dc_state *context); + #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 0e218f9e2a86..34737d60b965 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dm_services.h" @@ -627,12 +629,8 @@ void dcn35_power_down_on_boot(struct dc *dc) if (dc->clk_mgr->funcs->set_low_power_state) dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); - if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) { - if (!dc->idle_optimizations_allowed) { - dc_dmub_srv_notify_idle(dc, true); - dc->idle_optimizations_allowed = true; - } - } + if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) + dc_allow_idle_optimizations(dc, true); } bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 14bbdb0fa634..0dff10d179b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HWSS_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index cea05843990c..901891316dfb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -188,6 +188,7 @@ struct hubbub_funcs { * compressed or detiled buffers. */ void (*program_det_size)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_in_kbyte); + void (*wait_for_det_apply)(struct hubbub *hubbub, int hubp_inst); void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase); void (*init_crb)(struct hubbub *hubbub); void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 5171d04519ee..4fb9cd6708d5 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c index e09ca4594ec3..262bb8b74b15 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "dm_services.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h index fd64e3848ff3..be8fe836b3f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef __DAL_IRQ_SERVICE_DCN303_H__ diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 530c2578db40..93354bff456a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -178,7 +178,7 @@ enum dc_interrupt_context { INTERRUPT_CONTEXT_NUMBER }; -enum dc_interrupt_porlarity { +enum dc_interrupt_polarity { INTERRUPT_POLARITY_DEFAULT = 0, INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT, INTERRUPT_POLARITY_HIGH, @@ -199,12 +199,12 @@ struct dc_interrupt_params { /* The polarity *change* which will trigger an interrupt. * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then * 'current_polarity' must be initialised. */ - enum dc_interrupt_porlarity requested_polarity; + enum dc_interrupt_polarity requested_polarity; /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH', * 'current_polarity' should contain the current state, which means * the interrupt will be triggered when state changes from what is, * in 'current_polarity'. */ - enum dc_interrupt_porlarity current_polarity; + enum dc_interrupt_polarity current_polarity; enum dc_irq_source irq_source; enum dc_interrupt_context int_context; }; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 0894e6aef3dd..21a39afd274b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -839,6 +839,12 @@ bool dp_set_test_pattern( pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range else pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + + if (color_space == COLOR_SPACE_YCBCR601_LIMITED) + pipe_ctx->stream->vsc_infopacket.sb[16] &= 0xf0; + else if (color_space == COLOR_SPACE_YCBCR709_LIMITED) + pipe_ctx->stream->vsc_infopacket.sb[16] |= 1; + resource_build_info_frame(pipe_ctx); link->dc->hwss.update_info_frame(pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 4538451945b4..34a4a8c0e18c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1932,8 +1932,7 @@ static void disable_link_dp(struct dc_link *link, dp_disable_link_phy(link, link_res, signal); if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (!link->dc->config.edp_no_power_sequencing && - !link->skip_implict_edp_power_control) + if (!link->skip_implict_edp_power_control) link->dc->hwss.edp_power_control(link, false); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 938df1f0f7da..e32a7974a4bc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -182,7 +182,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link, &backlight_control, 1) != DC_OK) return false; } else { - const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; //if target luminance value is greater than 24 bits, clip the value to 24 bits @@ -191,6 +191,11 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + backlight_enable |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(backlight_enable)) != DC_OK) @@ -283,8 +288,8 @@ bool set_default_brightness_aux(struct dc_link *link) if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; - // if < 5 nits or > 5000, it might be wrong readback - if (default_backlight < 5000 || default_backlight > 5000000) + // if < 1 nits or > 5000, it might be wrong readback + if (default_backlight < 1000 || default_backlight > 5000000) default_backlight = 150000; // return edp_set_backlight_level_nits(link, true, diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c index b42369984473..878700160fa9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "../dmub_srv.h" diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h index 84141d450256..abe087251cc1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef _DMUB_DCN303_H_ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index b99db771e071..e43e8d4bfe37 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -352,6 +352,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up; + funcs->should_detect = dmub_dcn35_should_detect; break; default: diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 325c5ba4c82a..1b14b17a79c7 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -132,7 +132,6 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - signal == SIGNAL_TYPE_VIRTUAL || dc_is_hdmi_signal(signal)); } diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h index 6e29a185de51..765d9ca2316f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h @@ -242,6 +242,34 @@ //MP0_SMN_C2PMSG_103 #define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 #define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_109 +#define MP0_SMN_C2PMSG_109__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_126 +#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING__SHIFT 0x0 +#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD__SHIFT 0x1 +#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING__SHIFT 0x2 +#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING__SHIFT 0x3 +#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING__SHIFT 0x4 +#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING__SHIFT 0x5 +#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST__SHIFT 0x6 +#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST__SHIFT 0x7 +#define MP0_SMN_C2PMSG_126__SOCKET_ID__SHIFT 0x8 +#define MP0_SMN_C2PMSG_126__AID_ID__SHIFT 0xb +#define MP0_SMN_C2PMSG_126__HBM_ID__SHIFT 0xd +#define MP0_SMN_C2PMSG_126__BOOT_STATUS__SHIFT 0x1f +#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING_MASK 0x00000001L +#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD_MASK 0x00000002L +#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING_MASK 0x00000004L +#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING_MASK 0x00000008L +#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING_MASK 0x00000010L +#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING_MASK 0x00000020L +#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST_MASK 0x00000040L +#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST_MASK 0x00000080L +#define MP0_SMN_C2PMSG_126__SOCKET_ID_MASK 0x00000700L +#define MP0_SMN_C2PMSG_126__AID_ID_MASK 0x00001800L +#define MP0_SMN_C2PMSG_126__HBM_ID_MASK 0x00002000L +#define MP0_SMN_C2PMSG_126__BOOT_STATUS_MASK 0x80000000L //MP0_SMN_IH_CREDIT #define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 #define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 3201808c2dd8..cd3c40a86029 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -1080,33 +1080,35 @@ struct gpu_metrics_v3_0 { uint16_t average_ipu_activity[8]; /* time filtered per-core C0 residency % [0-100]*/ uint16_t average_core_c0_activity[16]; - /* time filtered DRAM read bandwidth [GB/sec] */ + /* time filtered DRAM read bandwidth [MB/sec] */ uint16_t average_dram_reads; - /* time filtered DRAM write bandwidth [GB/sec] */ + /* time filtered DRAM write bandwidth [MB/sec] */ uint16_t average_dram_writes; /* Driver attached timestamp (in ns) */ uint64_t system_clock_counter; /* Power/Energy */ - /* average dGPU + APU power on A + A platform */ + /* time filtered power used for PPT/STAPM [APU+dGPU] [mW] */ uint32_t average_socket_power; - /* average IPU power [W] */ + /* time filtered IPU power [mW] */ uint16_t average_ipu_power; - /* average APU power [W] */ + /* time filtered APU power [mW] */ uint32_t average_apu_power; - /* average dGPU power [W] */ + /* time filtered GFX power [mW] */ + uint32_t average_gfx_power; + /* time filtered dGPU power [mW] */ uint32_t average_dgpu_power; - /* sum of core power across all cores in the socket [W] */ - uint32_t average_core_power; - /* calculated core power [W] */ - uint16_t core_power[16]; - /* maximum IRM defined STAPM power limit [W] */ + /* time filtered sum of core power across all cores in the socket [mW] */ + uint32_t average_all_core_power; + /* calculated core power [mW] */ + uint16_t average_core_power[16]; + /* maximum IRM defined STAPM power limit [mW] */ uint16_t stapm_power_limit; - /* time filtered STAPM power limit [W] */ + /* time filtered STAPM power limit [mW] */ uint16_t current_stapm_power_limit; - /* Average clocks */ + /* time filtered clocks [MHz] */ uint16_t average_gfxclk_frequency; uint16_t average_socclk_frequency; uint16_t average_vpeclk_frequency; @@ -1115,7 +1117,7 @@ struct gpu_metrics_v3_0 { uint16_t average_vclk_frequency; /* Current clocks */ - /* target core frequency */ + /* target core frequency [MHz] */ uint16_t current_coreclk[16]; /* CCLK frequency limit enforced on classic cores [MHz] */ uint16_t current_core_maxfreq; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 358bb5e485f2..4ba9195c83c5 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3288,10 +3288,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); uint32_t tmp; - /* under multi-vf mode, the hwmon attributes are all not supported */ - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; - /* under pp one vf mode manage of hwmon attributes is not supported */ if (amdgpu_sriov_is_pp_one_vf(adev)) effective_mode &= ~S_IWUSR; @@ -4162,6 +4158,7 @@ err_out: int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { + enum amdgpu_sriov_vf_mode mode; uint32_t mask = 0; int ret; @@ -4173,17 +4170,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.dpm_enabled == 0) return 0; - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, - DRIVER_NAME, adev, - hwmon_groups); - if (IS_ERR(adev->pm.int_hwmon_dev)) { - ret = PTR_ERR(adev->pm.int_hwmon_dev); - dev_err(adev->dev, - "Unable to register hwmon device: %d\n", ret); - return ret; + mode = amdgpu_virt_get_sriov_vf_mode(adev); + + /* under multi-vf mode, the hwmon attributes are all not supported */ + if (mode != SRIOV_VF_MODE_MULTI_VF) { + adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, + DRIVER_NAME, adev, + hwmon_groups); + if (IS_ERR(adev->pm.int_hwmon_dev)) { + ret = PTR_ERR(adev->pm.int_hwmon_dev); + dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret); + return ret; + } } - switch (amdgpu_virt_get_sriov_vf_mode(adev)) { + switch (mode) { case SRIOV_VF_MODE_ONE_VF: mask = ATTR_FLAG_ONEVF; break; @@ -4290,10 +4291,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a seq_printf(m, "\t%u mV (VDDNB)\n", value); size = sizeof(uint32_t); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); + seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff); size = sizeof(uint32_t); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff); + seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff); size = sizeof(value); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 9e4f8a4104a3..914c15387157 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1022,6 +1022,9 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, *limit /= 100; } break; + case PP_PWR_LIMIT_MIN: + *limit = 0; + break; default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h index 9fcad69a9f34..2cf2a7b12623 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h @@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record { typedef struct _ATOM_Tonga_VCE_State_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Tonga_VCE_State_Record entries[1]; + ATOM_Tonga_VCE_State_Record entries[]; } ATOM_Tonga_VCE_State_Table; typedef struct _ATOM_Tonga_PowerTune_Table { @@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record { typedef struct _ATOM_Tonga_Hard_Limit_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Tonga_Hard_Limit_Record entries[1]; + ATOM_Tonga_Hard_Limit_Record entries[]; } ATOM_Tonga_Hard_Limit_Table; typedef struct _ATOM_Tonga_GPIO_Table { diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 5a2371484a58..11372fcc59c8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; - data->pcie_dpm_key_disabled = - !amdgpu_device_pcie_dynamic_switching_supported() || - !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); + data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); /* need to set voltage control types before EVV patching */ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h index 8b0590b834cc..de2926df5ed7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h @@ -129,7 +129,7 @@ typedef struct _ATOM_Vega10_State { typedef struct _ATOM_Vega10_State_Array { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_State states[]; /* Dynamically allocate entries. */ } ATOM_Vega10_State_Array; typedef struct _ATOM_Vega10_CLK_Dependency_Record { @@ -169,37 +169,37 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table { typedef struct _ATOM_Vega10_MCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_MCLK_Dependency_Table; typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_SOCCLK_Dependency_Table; typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_DCEFCLK_Dependency_Table; typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_PIXCLK_Dependency_Table; typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries.*/ - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_DISPCLK_Dependency_Table; typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_PHYCLK_Dependency_Table; typedef struct _ATOM_Vega10_MM_Dependency_Record { @@ -213,7 +213,7 @@ typedef struct _ATOM_Vega10_MM_Dependency_Record { typedef struct _ATOM_Vega10_MM_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries */ - ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ + ATOM_Vega10_MM_Dependency_Record entries[]; /* Dynamically allocate entries */ } ATOM_Vega10_MM_Dependency_Table; typedef struct _ATOM_Vega10_PCIE_Record { @@ -225,7 +225,7 @@ typedef struct _ATOM_Vega10_PCIE_Record { typedef struct _ATOM_Vega10_PCIE_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries */ - ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Vega10_PCIE_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_PCIE_Table; typedef struct _ATOM_Vega10_Voltage_Lookup_Record { @@ -235,7 +235,7 @@ typedef struct _ATOM_Vega10_Voltage_Lookup_Record { typedef struct _ATOM_Vega10_Voltage_Lookup_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries */ - ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ + ATOM_Vega10_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries */ } ATOM_Vega10_Voltage_Lookup_Table; typedef struct _ATOM_Vega10_Fan_Table { @@ -327,7 +327,7 @@ typedef struct _ATOM_Vega10_VCE_State_Record { typedef struct _ATOM_Vega10_VCE_State_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Vega10_VCE_State_Record entries[1]; + ATOM_Vega10_VCE_State_Record entries[]; } ATOM_Vega10_VCE_State_Table; typedef struct _ATOM_Vega10_PowerTune_Table { @@ -427,7 +427,7 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record { typedef struct _ATOM_Vega10_Hard_Limit_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Vega10_Hard_Limit_Record entries[1]; + ATOM_Vega10_Hard_Limit_Record entries[]; } ATOM_Vega10_Hard_Limit_Table; typedef struct _Vega10_PPTable_Generic_SubTable_Header { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7087f9840ab7..23b00eddc1af 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -733,7 +733,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_EXIT; + smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1678,13 +1678,14 @@ static int smu_disable_dpms(struct smu_context *smu) } /* - * For SMU 13.0.4/11, PMFW will handle the features disablement properly + * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): return 0; default: break; @@ -1741,10 +1742,31 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return 0; } +static int smu_reset_mp1_state(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if ((!adev->in_runpm) && (!adev->in_suspend) && + (!amdgpu_in_reset(adev))) + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 7): + case IP_VERSION(13, 0, 10): + ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); + break; + default: + break; + } + + return ret; +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1762,7 +1784,15 @@ static int smu_hw_fini(void *handle) adev->pm.dpm_enabled = false; - return smu_smc_hw_cleanup(smu); + ret = smu_smc_hw_cleanup(smu); + if (ret) + return ret; + + ret = smu_reset_mp1_state(smu); + if (ret) + return ret; + + return 0; } static void smu_late_fini(void *handle) @@ -2434,7 +2464,6 @@ int smu_get_power_limit(void *handle, break; default: return -EOPNOTSUPP; - break; } switch (pp_limit_level) { @@ -2452,7 +2481,6 @@ int smu_get_power_limit(void *handle, break; default: return -EOPNOTSUPP; - break; } if (limit_type != SMU_DEFAULT_PPT_LIMIT) { @@ -2486,7 +2514,7 @@ int smu_get_power_limit(void *handle, *limit = smu->min_power_limit; break; default: - break; + return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 839553a86aa2..8def291b18bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -419,6 +419,7 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, + SMU_BACO_STATE_NONE, }; struct smu_baco_context { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index b483c8e096e7..22f88842a7fd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -150,97 +150,39 @@ typedef struct { } DpmClocks_t; typedef struct { - uint16_t CoreFrequency[16]; //Target core frequency [MHz] - uint16_t CorePower[16]; //CAC calculated core power [W] [Q8.8] - uint16_t CoreTemperature[16]; //TSEN measured core temperature [C] [Q8.8] - uint16_t GfxTemperature; //TSEN measured GFX temperature [C] [Q8.8] - uint16_t SocTemperature; //TSEN measured SOC temperature [C] [Q8.8] - uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [W] [Q8.8] - uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [W] [Q8.8] - uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] - uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] - uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [C] [Q8.8] - uint16_t AverageGfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] - uint16_t AverageFclkFrequency; //Time filtered target FCLK frequency [MHz] - uint16_t AverageGfxActivity; //Time filtered GFX busy % [0-100] [Q8.8] - uint16_t AverageSocclkFrequency; //Time filtered target SOCCLK frequency [MHz] - uint16_t AverageVclkFrequency; //Time filtered target VCLK frequency [MHz] - uint16_t AverageVcnActivity; //Time filtered VCN busy % [0-100] [Q8.8] - uint16_t AverageVpeclkFrequency; //Time filtered target VPECLK frequency [MHz] - uint16_t AverageIpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] - uint16_t AverageIpuBusy[8]; //Time filtered IPU per-column busy % [0-100] [Q8.8] - uint16_t AverageDRAMReads; //Time filtered DRAM read bandwidth [GB/sec] [Q8.8] - uint16_t AverageDRAMWrites; //Time filtered DRAM write bandwidth [GB/sec] [Q8.8] - uint16_t AverageCoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] [Q8.8] - uint16_t IpuPower; //Time filtered IPU power [W] [Q8.8] - uint32_t ApuPower; //Time filtered APU power [W] [Q24.8] - uint32_t dGpuPower; //Time filtered dGPU power [W] [Q24.8] - uint32_t AverageSocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [W] [Q24.8] - uint32_t AverageCorePower; //Time filtered sum of core power across all cores in the socket [W] [Q24.8] - uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] - uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] + uint16_t CoreFrequency[16]; //Target core frequency [MHz] + uint16_t CorePower[16]; //CAC calculated core power [mW] + uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C] + uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C] + uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C] + uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW] + uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW] + uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] + uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] + uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C] + uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] + uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz] + uint16_t GfxActivity; //Time filtered GFX busy % [0-100] + uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz] + uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz] + uint16_t VcnActivity; //Time filtered VCN busy % [0-100] + uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz] + uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] + uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100] + uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec] + uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec] + uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] + uint16_t IpuPower; //Time filtered IPU power [mW] + uint32_t ApuPower; //Time filtered APU power [mW] + uint32_t GfxPower; //Time filtered GFX power [mW] + uint32_t dGpuPower; //Time filtered dGPU power [mW] + uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW] + uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW] + uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] + uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] + uint32_t spare[16]; } SmuMetrics_t; -typedef struct { - uint16_t GfxclkFrequency; //[MHz] - uint16_t SocclkFrequency; //[MHz] - uint16_t VclkFrequency; //[MHz] - uint16_t DclkFrequency; //[MHz] - uint16_t MemclkFrequency; //[MHz] - uint16_t spare; - uint16_t UvdActivity; //[centi] - uint16_t GfxActivity; //[centi] - - uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC - - uint16_t CoreFrequency[8]; //[MHz] - uint16_t CorePower[8]; //[mW] - uint16_t CoreTemperature[8]; //[centi-Celsius] - uint16_t L3Frequency[2]; //[MHz] - uint16_t L3Temperature[2]; //[centi-Celsius] - - uint16_t spare2[24]; - - uint16_t GfxTemperature; //[centi-Celsius] - uint16_t SocTemperature; //[centi-Celsius] - uint16_t ThrottlerStatus; - - uint16_t CurrentSocketPower; //[mW] - uint16_t StapmOpnLimit; //[W] - uint16_t StapmCurrentLimit; //[W] - uint32_t ApuPower; //[mW] - uint32_t dGpuPower; //[mW] - - uint16_t VddTdcValue; //[mA] - uint16_t SocTdcValue; //[mA] - uint16_t VddEdcValue; //[mA] - uint16_t SocEdcValue; //[mA] - - uint16_t InfrastructureCpuMaxFreq; //[MHz] - uint16_t InfrastructureGfxMaxFreq; //[MHz] - - uint16_t SkinTemp; - uint16_t DeviceState; - uint16_t CurTemp; //[centi-Celsius] - uint16_t FilterAlphaValue; //[m] - - uint16_t AverageGfxclkFrequency; - uint16_t AverageFclkFrequency; - uint16_t AverageGfxActivity; - uint16_t AverageSocclkFrequency; - uint16_t AverageVclkFrequency; - uint16_t AverageVcnActivity; - uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads - uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes - uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower - uint16_t AverageCorePower[2]; //Filtered of [sum of CorePower[8] per ccx]) - uint16_t AverageCoreC0Residency[16]; //Filtered of [average C0 residency % per core] - uint16_t spare1; - uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing. -} SmuMetrics_legacy_t; - //ISP tile definitions typedef enum { TILE_XTILE = 0, //ONO0 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index cc02f979e9e9..95cb919718ae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -299,5 +299,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap); +int smu_v13_0_disable_pmfw_state(struct smu_context *smu); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 090249b6422a..1de9f8b5cc5f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2115,7 +2115,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, min_lane_width = min_lane_width > max_lane_width ? max_lane_width : min_lane_width; - if (!amdgpu_device_pcie_dynamic_switching_supported()) { + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { pcie_table->pcie_gen[0] = max_gen_speed; pcie_table->pcie_lane[0] = max_lane_width; } else { @@ -2461,12 +2461,18 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t val; + uint32_t smu_version; + int ret; /** * SRIOV env will not support SMU mode1 reset * PM FW support mode1 reset from 58.26 */ - if (amdgpu_sriov_vf(adev) || (smu->smc_fw_version < 0x003a1a00)) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return false; + + if (amdgpu_sriov_vf(adev) || (smu_version < 0x003a1a00)) return false; /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 3efc6aed28f1..762b31455a0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -234,24 +234,15 @@ static int vangogh_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - if (smu->smc_fw_if_version < 0x3) { - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); - } else { - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); - } + smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL); if (!smu_table->metrics_table) goto err0_out; smu_table->metrics_time = 0; - if (smu->smc_fw_version >= 0x043F3E00) - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3); - else - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f082cd4b40c1..1a6675d70a4b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1931,11 +1931,19 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) #if 0 struct amdgpu_device *adev = smu->adev; uint32_t val; + uint32_t smu_version; + int ret; + /** * PM FW version support mode1 reset from 68.07 */ - if ((smu->smc_fw_version < 0x00440700)) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) return false; + + if ((smu_version < 0x00440700)) + return false; + /** * mode1 reset relies on PSP, so we should check if * PSP is alive. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index ba3ef3c2918a..cf1b84060bc3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2438,7 +2438,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, uint32_t smu_pcie_arg; int ret, i; - if (!amdgpu_device_pcie_dynamic_switching_supported()) { + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; @@ -2474,3 +2477,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, return 0; } + +int smu_v13_0_disable_pmfw_state(struct smu_context *smu) +{ + int ret; + struct amdgpu_device *adev = smu->adev; + + WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0); + + ret = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + + return ret == 0 ? 0 : -EINVAL; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index bcd7b39a3a1b..82c4e1f1c6f0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -354,12 +354,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; - if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) + if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) { smu_baco->platform_support = true; - if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) - smu_baco->maco_support = true; + if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) + smu_baco->maco_support = true; + } if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) @@ -2530,38 +2530,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && - (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || - ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_COMPUTE_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } - - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_CUSTOM); - } else { - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, smu->power_profile_mode); - } if (workload_type < 0) return -EINVAL; @@ -2602,26 +2574,38 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu) static int smu_v13_0_0_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ usleep_range(10000, 11000); - return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); } else { - return smu_v13_0_baco_exit(smu); + ret = smu_v13_0_baco_exit(smu); } + + if (!ret) + adev->gfx.is_poweron = false; + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + u32 smu_version; + int ret; /* SRIOV does not support SMU mode1 reset */ if (amdgpu_sriov_vf(adev)) return false; /* PMFW support is available since 78.41 */ - if (smu->smc_fw_version < 0x004e2900) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return false; + + if (smu_version < 0x004e2900) return false; return true; @@ -2788,7 +2772,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_set_mp1_state(smu, mp1_state); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_PrepareMp1ForUnload, + 0x55, NULL); + + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) + ret = smu_v13_0_disable_pmfw_state(smu); + break; default: /* Ignore others */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 60eb6f8af187..20f66e696f87 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -270,7 +270,7 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t p2s_table_id = P2S_TABLE_ID_A; int ret = 0, i, p2stable_count; - char ucode_prefix[30]; + char ucode_prefix[15]; char fw_name[30]; /* No need to load P2S tables in IOV mode */ @@ -1478,6 +1478,7 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) if (smu->smc_fw_version < 0x554800) return 0; + amdgpu_ras_set_mca_debug_mode(smu->adev, enable); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, NULL); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index ac0e1cc812bd..81eafed76045 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -346,12 +346,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; - if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) + if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) { smu_baco->platform_support = true; - if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) - smu_baco->maco_support = true; + if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) + && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) + smu_baco->maco_support = true; + } if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) @@ -2498,7 +2499,13 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_set_mp1_state(smu, mp1_state); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_PrepareMp1ForUnload, + 0x55, NULL); + + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) + ret = smu_v13_0_disable_pmfw_state(smu); + break; default: /* Ignore others */ @@ -2524,14 +2531,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu) static int smu_v13_0_7_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ usleep_range(10000, 11000); - return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); } else { - return smu_v13_0_baco_exit(smu); + ret = smu_v13_0_baco_exit(smu); } + + if (!ret) + adev->gfx.is_poweron = false; + + return ret; } static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 4ac22f44d160..d8f8ad0e7137 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -57,7 +57,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char fw_name[30]; - char ucode_prefix[30]; + char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -229,6 +229,8 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; case IP_VERSION(14, 0, 0): + if ((smu->smc_fw_version < 0x5d3a00)) + dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version); smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index c36fc10b63c8..03b38c3a9968 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -156,15 +156,10 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - if (smu->smc_fw_version > 0x5d3500) { - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); - } else { - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); - } + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) goto err0_out; smu_table->metrics_time = 0; @@ -177,10 +172,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) if (!smu_table->watermarks_table) goto err2_out; - if (smu->smc_fw_version > 0x5d3500) - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0); - else - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err3_out; @@ -242,13 +234,13 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, switch (member) { case METRICS_AVERAGE_GFXCLK: - *value = metrics->AverageGfxclkFrequency; + *value = metrics->GfxclkFrequency; break; case METRICS_AVERAGE_SOCCLK: - *value = metrics->AverageSocclkFrequency; + *value = metrics->SocclkFrequency; break; case METRICS_AVERAGE_VCLK: - *value = metrics->AverageVclkFrequency; + *value = metrics->VclkFrequency; break; case METRICS_AVERAGE_DCLK: *value = 0; @@ -257,25 +249,25 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, *value = 0; break; case METRICS_AVERAGE_FCLK: - *value = metrics->AverageFclkFrequency; + *value = metrics->FclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY: - *value = metrics->AverageGfxActivity >> 8; + *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->AverageVcnActivity >> 8; + *value = metrics->VcnActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: case METRICS_CURR_SOCKETPOWER: - *value = (metrics->AverageSocketPower & 0xff00) + - ((metrics->AverageSocketPower & 0xff) * 100 >> 8); + *value = (metrics->SocketPower / 1000 << 8) + + (metrics->SocketPower % 1000 / 10); break; case METRICS_TEMPERATURE_EDGE: - *value = (metrics->GfxTemperature >> 8) * + *value = metrics->GfxTemperature / 100 * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = (metrics->SocTemperature >> 8) * + *value = metrics->SocTemperature / 100 * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_THROTTLER_STATUS: @@ -317,107 +309,6 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, return ret; } -static int smu_v14_0_0_legacy_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; - int ret = 0; - - ret = smu_cmn_get_metrics_table(smu, NULL, false); - if (ret) - return ret; - - switch (member) { - case METRICS_AVERAGE_GFXCLK: - *value = metrics->GfxclkFrequency; - break; - case METRICS_AVERAGE_SOCCLK: - *value = metrics->SocclkFrequency; - break; - case METRICS_AVERAGE_VCLK: - *value = metrics->VclkFrequency; - break; - case METRICS_AVERAGE_DCLK: - *value = metrics->DclkFrequency; - break; - case METRICS_AVERAGE_UCLK: - *value = metrics->MemclkFrequency; - break; - case METRICS_AVERAGE_GFXACTIVITY: - *value = metrics->GfxActivity / 100; - break; - case METRICS_AVERAGE_FCLK: - *value = metrics->AverageFclkFrequency; - break; - case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; - break; - case METRICS_AVERAGE_SOCKETPOWER: - *value = (metrics->AverageSocketPower << 8) / 1000; - break; - case METRICS_CURR_SOCKETPOWER: - *value = (metrics->CurrentSocketPower << 8) / 1000; - break; - case METRICS_TEMPERATURE_EDGE: - *value = metrics->GfxTemperature / 100 * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - break; - case METRICS_TEMPERATURE_HOTSPOT: - *value = metrics->SocTemperature / 100 * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - break; - case METRICS_THROTTLER_STATUS: - *value = metrics->ThrottlerStatus; - break; - case METRICS_VOLTAGE_VDDGFX: - *value = metrics->Voltage[0]; - break; - case METRICS_VOLTAGE_VDDSOC: - *value = metrics->Voltage[1]; - break; - case METRICS_SS_APU_SHARE: - /* return the percentage of APU power with respect to APU's power limit. - * percentage is reported, this isn't boost value. Smartshift power - * boost/shift is only when the percentage is more than 100. - */ - if (metrics->StapmOpnLimit > 0) - *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; - else - *value = 0; - break; - case METRICS_SS_DGPU_SHARE: - /* return the percentage of dGPU power with respect to dGPU's power limit. - * percentage is reported, this isn't boost value. Smartshift power - * boost/shift is only when the percentage is more than 100. - */ - if ((metrics->dGpuPower > 0) && - (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) - *value = (metrics->dGpuPower * 100) / - (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); - else - *value = 0; - break; - default: - *value = UINT_MAX; - break; - } - - return ret; -} - -static int smu_v14_0_0_common_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value) -{ - if (smu->smc_fw_version > 0x5d3500) - return smu_v14_0_0_get_smu_metrics_data(smu, member, value); - else - return smu_v14_0_0_legacy_get_smu_metrics_data(smu, member, value); -} - static int smu_v14_0_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) @@ -429,69 +320,69 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu, switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXACTIVITY, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_TEMPERATURE_HOTSPOT, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_UCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_VDDGFX: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_VDDNB: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDSOC, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_SS_APU_SHARE: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_SS_APU_SHARE, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, + ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_SS_DGPU_SHARE, (uint32_t *)data); *size = 4; @@ -588,7 +479,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu, if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0); gpu_metrics->temperature_gfx = metrics.GfxTemperature; gpu_metrics->temperature_soc = metrics.SocTemperature; @@ -597,32 +488,33 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu, sizeof(uint16_t) * 16); gpu_metrics->temperature_skin = metrics.SkinTemp; - gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; - gpu_metrics->average_vcn_activity = metrics.AverageVcnActivity; + gpu_metrics->average_gfx_activity = metrics.GfxActivity; + gpu_metrics->average_vcn_activity = metrics.VcnActivity; memcpy(&gpu_metrics->average_ipu_activity[0], - &metrics.AverageIpuBusy[0], + &metrics.IpuBusy[0], sizeof(uint16_t) * 8); memcpy(&gpu_metrics->average_core_c0_activity[0], - &metrics.AverageCoreC0Residency[0], + &metrics.CoreC0Residency[0], sizeof(uint16_t) * 16); - gpu_metrics->average_dram_reads = metrics.AverageDRAMReads; - gpu_metrics->average_dram_writes = metrics.AverageDRAMWrites; + gpu_metrics->average_dram_reads = metrics.DRAMReads; + gpu_metrics->average_dram_writes = metrics.DRAMWrites; - gpu_metrics->average_socket_power = metrics.AverageSocketPower; + gpu_metrics->average_socket_power = metrics.SocketPower; gpu_metrics->average_ipu_power = metrics.IpuPower; gpu_metrics->average_apu_power = metrics.ApuPower; + gpu_metrics->average_gfx_power = metrics.GfxPower; gpu_metrics->average_dgpu_power = metrics.dGpuPower; - gpu_metrics->average_core_power = metrics.AverageCorePower; - memcpy(&gpu_metrics->core_power[0], + gpu_metrics->average_all_core_power = metrics.AllCorePower; + memcpy(&gpu_metrics->average_core_power[0], &metrics.CorePower[0], sizeof(uint16_t) * 16); - gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; - gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; - gpu_metrics->average_vpeclk_frequency = metrics.AverageVpeclkFrequency; - gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency; - gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency; - gpu_metrics->average_ipuclk_frequency = metrics.AverageIpuclkFrequency; + gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; + gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.FclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; + gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency; memcpy(&gpu_metrics->current_coreclk[0], &metrics.CoreFrequency[0], @@ -638,68 +530,6 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v3_0); } -static ssize_t smu_v14_0_0_get_legacy_gpu_metrics(struct smu_context *smu, - void **table) -{ - struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v2_1 *gpu_metrics = - (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; - SmuMetrics_legacy_t metrics; - int ret = 0; - - ret = smu_cmn_get_metrics_table(smu, &metrics, true); - if (ret) - return ret; - - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); - - gpu_metrics->temperature_gfx = metrics.GfxTemperature; - gpu_metrics->temperature_soc = metrics.SocTemperature; - memcpy(&gpu_metrics->temperature_core[0], - &metrics.CoreTemperature[0], - sizeof(uint16_t) * 8); - gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; - gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; - - gpu_metrics->average_gfx_activity = metrics.GfxActivity; - gpu_metrics->average_mm_activity = metrics.UvdActivity; - - gpu_metrics->average_socket_power = metrics.CurrentSocketPower; - gpu_metrics->average_gfx_power = metrics.Power[0]; - gpu_metrics->average_soc_power = metrics.Power[1]; - memcpy(&gpu_metrics->average_core_power[0], - &metrics.CorePower[0], - sizeof(uint16_t) * 8); - - gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; - gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; - gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; - gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; - gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; - gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; - - memcpy(&gpu_metrics->current_coreclk[0], - &metrics.CoreFrequency[0], - sizeof(uint16_t) * 8); - - gpu_metrics->throttle_status = metrics.ThrottlerStatus; - gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); - - *table = (void *)gpu_metrics; - - return sizeof(struct gpu_metrics_v2_1); -} - -static ssize_t smu_v14_0_0_common_get_gpu_metrics(struct smu_context *smu, - void **table) -{ - - if (smu->smc_fw_version > 0x5d3500) - return smu_v14_0_0_get_gpu_metrics(smu, table); - else - return smu_v14_0_0_get_legacy_gpu_metrics(smu, table); -} - static int smu_v14_0_0_mode2_reset(struct smu_context *smu) { int ret; @@ -928,7 +758,7 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, return -EINVAL; } - return smu_v14_0_0_common_get_smu_metrics_data(smu, member_type, value); + return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); } static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, @@ -1230,7 +1060,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .read_sensor = smu_v14_0_0_read_sensor, .is_dpm_running = smu_v14_0_0_is_dpm_running, .set_watermarks_table = smu_v14_0_0_set_watermarks_table, - .get_gpu_metrics = smu_v14_0_0_common_get_gpu_metrics, + .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_driver_table_location = smu_v14_0_set_driver_table_location, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 6e57c94379a9..001a5cf09657 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1004,6 +1004,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(2, 4): structure_size = sizeof(struct gpu_metrics_v2_4); break; + case METRICS_VERSION(3, 0): + structure_size = sizeof(struct gpu_metrics_v3_0); + break; default: return; } |