diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
67 files changed, 1147 insertions, 676 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d891ab779ca7..5df21529b3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) return -EINVAL; + /* Make sure VRAM is allocated contigiously */ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); - for (i = 0; i < (*bo)->placement.num_placement; i++) - (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; - r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); - if (r) - return r; + if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM && + !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { + + amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); + for (i = 0; i < (*bo)->placement.num_placement; i++) + (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; + r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); + if (r) + return r; + } return amdgpu_ttm_alloc_gart(&(*bo)->tbo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 946c48829f19..824f9da5b6ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -343,11 +343,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check, coredump->skip_vram_check = skip_vram_check; coredump->reset_vram_lost = vram_lost; - if (job && job->vm) { - struct amdgpu_vm *vm = job->vm; + if (job && job->pasid) { struct amdgpu_task_info *ti; - ti = amdgpu_vm_get_task_info_vm(vm); + ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid); if (ti) { coredump->reset_task_info = *ti; amdgpu_vm_put_task_info(ti); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9095c05e0269..cd4fac120834 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = { "LAST", }; -#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0) +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0) /* * Default init level where all blocks are expected to be initialized. This is * the level of initialization expected by default and also after a full reset @@ -417,6 +417,9 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + return false; + if (adev->has_pr3 || ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) return true; @@ -3670,9 +3673,11 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(&adev->ip_blocks[i]); - DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) + if (r) { + dev_err(adev->dev, "RE-INIT-early: %s failed\n", + block->version->funcs->name); return r; + } block->status.hw = true; } } @@ -3682,7 +3687,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) { - int i, r; + struct amdgpu_ip_block *block; + int i, r = 0; static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, @@ -3697,34 +3703,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { - int j; - struct amdgpu_ip_block *block; - - for (j = 0; j < adev->num_ip_blocks; j++) { - block = &adev->ip_blocks[j]; + block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]); - if (block->version->type != ip_order[i] || - !block->status.valid || - block->status.hw) - continue; + if (!block) + continue; + if (block->status.valid && !block->status.hw) { if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); - if (r) - return r; + r = amdgpu_ip_block_resume(block); } else { - r = block->version->funcs->hw_init(&adev->ip_blocks[i]); - if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - block->status.hw = true; + r = block->version->funcs->hw_init(block); + } + + if (r) { + dev_err(adev->dev, "RE-INIT-late: %s failed\n", + block->version->funcs->name); + break; } + block->status.hw = true; } } - return 0; + return r; } /** @@ -3765,7 +3765,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * First resume function for hardware IPs. The list of all the hardware + * Second resume function for hardware IPs. The list of all the hardware * IPs that make up the asic is walked and the resume callbacks are run for * all blocks except COMMON, GMC, and IH. resume puts the hardware into a * functional state after a suspend and updates the software state as @@ -3783,6 +3783,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); @@ -3794,6 +3795,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) } /** + * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Third resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * all DCE. resume puts the hardware into a functional state after a suspend + * and updates the software state as necessary. This function is also used + * for restoring the GPU after a GPU reset. + * + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; + } + } + + return 0; +} + +/** * amdgpu_device_ip_resume - run resume for hardware IPs * * @adev: amdgpu_device pointer @@ -3822,6 +3853,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(adev, true); + if (r) + return r; + + amdgpu_fence_driver_hw_init(adev); + + r = amdgpu_device_ip_resume_phase3(adev); + return r; } @@ -4902,7 +4940,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); goto exit; } - amdgpu_fence_driver_hw_init(adev); if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); @@ -5487,6 +5524,10 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) if (tmp_adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); + r = amdgpu_device_ip_resume_phase3(tmp_adev); + if (r) + goto out; + if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index b9d08bc96581..a21c510c408e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -255,7 +255,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, void amdgpu_job_free_resources(struct amdgpu_job *job) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; unsigned i; @@ -268,7 +267,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) f = NULL; for (i = 0; i < job->num_ibs; ++i) - amdgpu_ib_free(ring->adev, &job->ibs[i], f); + amdgpu_ib_free(NULL, &job->ibs[i], f); } static void amdgpu_job_free_cb(struct drm_sched_job *s_job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9f922ec50ea2..c8180cad0abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -61,7 +61,7 @@ #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 31fd30dcd593..65bb26215e86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) for (i = 0; i < abo->placement.num_placement; ++i) { abo->placements[i].fpfn = 0 >> PAGE_SHIFT; abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + if (abo->placements[i].mem_type == TTM_PL_VRAM) + abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8d9bf7a0857f..c9c48b782ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; - if (adev->gfx.enable_cleaner_shader && - ring->funcs->emit_cleaner_shader && - job->enforce_isolation) - ring->funcs->emit_cleaner_shader(ring); - - if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync && + !(job->enforce_isolation && !job->vmid)) return 0; amdgpu_ring_ib_begin(ring); @@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, if (need_pipe_sync) amdgpu_ring_emit_pipeline_sync(ring); + if (adev->gfx.enable_cleaner_shader && + ring->funcs->emit_cleaner_shader && + job->enforce_isolation) + ring->funcs->emit_cleaner_shader(ring); + if (vm_flush_needed) { trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); @@ -1265,10 +1266,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * next command submission. */ if (amdgpu_vm_is_bo_always_valid(vm, bo)) { - uint32_t mem_type = bo->tbo.resource->mem_type; - - if (!(bo->preferred_domains & - amdgpu_mem_type_to_domain(mem_type))) + if (bo->tbo.resource && + !(bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))) amdgpu_vm_bo_evicted(&bo_va->base); else amdgpu_vm_bo_idle(&bo_va->base); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index fe7c48f2fb2a..da327ab48a57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4123,7 +4123,7 @@ static int gfx_v12_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->ip_versions[GC_HWIP][0]) { + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): gfx_v12_0_update_gfx_clock_gating(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e2b3dda57030..54459254bd37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin"); #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, { int err; - err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, - "amdgpu/%s_mec.bin", chip_name); + if (amdgpu_sriov_vf(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_sjt_mec.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_mec.bin", chip_name); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index e019249883fb..194026e9be33 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -40,10 +40,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, @@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5)) return; - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - else + RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE); + } else { amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); + } } static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index ed7facacf2fe..d3962d469088 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -31,10 +31,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, @@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE); } else { amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index 29c3484ae1f1..f52552c5fa27 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -31,13 +31,15 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index 33736d361dd0..6948fe9956ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -34,10 +34,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 1c99bb09e2a1..63820329f67e 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -31,10 +31,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 7319299f25ae..03b8b7cd5229 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -604,7 +604,7 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev) static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work); + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); int cnt = 0; mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c index 0fbc3be81f14..f2ab5001b492 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c @@ -108,7 +108,7 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n", status); - switch (adev->ip_versions[MMHUB_HWIP][0]) { + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(4, 1, 0): mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw]; break; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index b1b57dcc5a73..d1032e9992b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -271,8 +271,19 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; +#define regRCC_DEV0_EPF6_STRAP4 0xd304 +#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 5 + static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { + uint32_t data; + + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { + case IP_VERSION(2, 5, 0): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4) & ~BIT(23); + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4, data); + break; + } } #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 814ab59fdd4a..41421da63a08 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -275,7 +275,7 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data); - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 11, 0): case IP_VERSION(7, 11, 1): case IP_VERSION(7, 11, 2): diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index 1ac730328516..3fb6d2aa7e3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -247,7 +247,7 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data); - switch (adev->ip_versions[NBIO_HWIP][0]) { + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(7, 7, 0): data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23); WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 079131aeb2f7..3c8ab8698af8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1288,7 +1288,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = amdgpu_job_ring(job); unsigned i; /* No patching necessary for the first instance */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 723f1220e1cc..e5324c5bc6c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1423,6 +1423,7 @@ err: static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, + bool cache_line_size_missing, struct kfd_gpu_cache_info *pcache_info) { struct amdgpu_device *adev = kdev->adev; @@ -1437,6 +1438,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* Scalar L1 Instruction Cache per SQC */ @@ -1449,6 +1452,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* Scalar L1 Data Cache per SQC */ @@ -1460,6 +1465,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 64; i++; } /* GL1 Data Cache per SA */ @@ -1472,7 +1479,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; - pcache_info[i].cache_line_size = 0; + if (cache_line_size_missing) + pcache_info[i].cache_line_size = 128; i++; } /* L2 Data Cache per GPU (Total Tex Cache) */ @@ -1484,6 +1492,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size; + if (cache_line_size_missing && !pcache_info[i].cache_line_size) + pcache_info[i].cache_line_size = 128; i++; } /* L3 Data Cache per GPU */ @@ -1494,7 +1504,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; - pcache_info[i].cache_line_size = 0; + pcache_info[i].cache_line_size = 64; i++; } return i; @@ -1510,6 +1520,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_tcp_size_per_cu) { pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu; pcache_info[i].cache_level = 1; + /* Cacheline size not available in IP discovery for gc943,gc944 */ + pcache_info[i].cache_line_size = 128; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1521,6 +1533,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, pcache_info[i].cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc; pcache_info[i].cache_level = 1; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1531,6 +1544,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) { pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc; pcache_info[i].cache_level = 1; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1541,6 +1555,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gfx.config.gc_tcc_size) { pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size; pcache_info[i].cache_level = 2; + pcache_info[i].cache_line_size = 128; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1551,6 +1566,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, if (adev->gmc.mall_size) { pcache_info[i].cache_size = adev->gmc.mall_size / 1024; pcache_info[i].cache_level = 3; + pcache_info[i].cache_line_size = 64; pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE); @@ -1563,6 +1579,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; + bool cache_line_size_missing = false; switch (kdev->adev->asic_type) { case CHIP_KAVERI: @@ -1686,10 +1703,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): + /* Cacheline size not available in IP discovery for gc11. + * kfd_fill_gpu_cache_info_from_gfx_config to hard code it + */ + cache_line_size_missing = true; + fallthrough; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): num_of_cache_types = - kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); + kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, + cache_line_size_missing, + *pcache_info); break; default: *pcache_info = dummy_cache_info; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 956198da7859..9b51dd75fefc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, */ kfd->device_info.needs_pci_atomics = true; kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0; + } else if (gc_version < IP_VERSION(13, 0, 0)) { + kfd->device_info.needs_pci_atomics = true; + kfd->device_info.no_atomic_fw_version = 2090; } else { kfd->device_info.needs_pci_atomics = true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c79fe9069e22..16b5daaa272f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -207,6 +207,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; + if (!pdd->proc_ctx_cpu_ptr) { + r = amdgpu_amdkfd_alloc_gtt_mem(adev, + AMDGPU_MES_PROC_CTX_SIZE, + &pdd->proc_ctx_bo, + &pdd->proc_ctx_gpu_addr, + &pdd->proc_ctx_cpu_ptr, + false); + if (r) { + dev_err(adev->dev, + "failed to allocate process context bo\n"); + return r; + } + memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); + } + memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_id = qpd->pqm->process->pasid; queue_input.page_table_base_addr = qpd->page_table_base; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index eacfeb32f35d..4b275937d05e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, spage = migrate_pfn_to_page(migrate->src[i]); if (spage && !is_zone_device_page(spage)) { src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, src[i]); if (r) { dev_err(dev, "%s: fail %d dma_map_page\n", @@ -629,7 +629,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, goto out_oom; } - dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, dst[i]); if (r) { dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 87cd52cf4ee9..d0ee173acf82 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) kfd_free_process_doorbells(pdd->dev->kfd, pdd); - if (pdd->dev->kfd->shared_resources.enable_mes) + if (pdd->dev->kfd->shared_resources.enable_mes && + pdd->proc_ctx_cpu_ptr) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, &pdd->proc_ctx_bo); /* @@ -1608,7 +1609,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd = NULL; - int retval = 0; if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) return NULL; @@ -1632,21 +1632,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); - if (dev->kfd->shared_resources.enable_mes) { - retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, - AMDGPU_MES_PROC_CTX_SIZE, - &pdd->proc_ctx_bo, - &pdd->proc_ctx_gpu_addr, - &pdd->proc_ctx_cpu_ptr, - false); - if (retval) { - dev_err(dev->adev->dev, - "failed to allocate process context bo\n"); - goto err_free_pdd; - } - memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - } - p->pdds[p->n_pdds++] = pdd; if (kfd_dbg_is_per_vmid_supported(pdd->dev)) pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( @@ -1658,10 +1643,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, idr_init(&pdd->alloc_idr); return pdd; - -err_free_pdd: - kfree(pdd); - return NULL; } /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c76db22a1000..59b92d66e958 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm, void pqm_uninit(struct process_queue_manager *pqm) { struct process_queue_node *pqn, *next; - struct kfd_process_device *pdd; list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { if (pqn->q) { - pdd = kfd_get_process_device_data(pqn->q->device, pqm->process); - kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); - kfd_queue_release_buffers(pdd, &pqn->q->properties); + struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device, + pqm->process); + if (pdd) { + kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); + kfd_queue_release_buffers(pdd, &pqn->q->properties); + } else { + WARN_ON(!pdd); + } pqm_clean_queue_resource(pqm, pqn); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f0a6816709ca..48be917e7bc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3481,6 +3481,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->aux_support = false; else if (amdgpu_backlight == 1) caps->aux_support = true; + if (caps->aux_support) + aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; luminance_range = &conn_base->display_info.luminance_range; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index b0fea0856866..6cbbb71d752b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -907,14 +907,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) struct drm_connector *connector = data; struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); unsigned char start = block * EDID_LENGTH; - void *edid; + struct edid *edid; int r; if (!acpidev) return -ENODEV; /* fetch the entire edid from BIOS */ - r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid); + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid); if (r < 0) { drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); return r; @@ -924,7 +924,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) goto cleanup; } - memcpy(buf, edid + start, len); + /* sanity check */ + if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) || + (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, (void *)edid + start, len); r = 0; cleanup: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1dd26d5df6b9..49fe7dcf9372 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -6109,3 +6109,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state profile.power_level = dc->res_pool->funcs->get_power_profile(context); return profile; } + +/* + ********************************************************************************** + * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state + * + * Called when DM wants to log detile buffer size from dc_state + * + ********************************************************************************** + */ +unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) +{ + struct dc *dc = context->clk_mgr->ctx->dc; + + if (dc->res_pool->funcs->get_det_buffer_size) + return dc->res_pool->funcs->get_det_buffer_size(context); + else + return 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 619fad17de55..626f75b6ad00 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2094,7 +2094,8 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master, count = resource_get_odm_slice_count(otg_master); h_active = timing->h_addressable + timing->h_border_left + - timing->h_border_right; + timing->h_border_right + + otg_master->hblank_borrow; width = h_active / count; if (otg_master->stream_res.tg) @@ -4027,6 +4028,41 @@ fail: } /** + * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context. + * @pipe_ctx: Pointer to the pipe context structure. + * + * This function calculates the horizontal blanking borrow value for a given pipe context based on the + * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less + * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the + * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow + * value to 0. + */ +static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) +{ + uint32_t hactive; + uint32_t ceil_slice_width; + struct dc_stream_state *stream = NULL; + + if (!pipe_ctx) + return; + + stream = pipe_ctx->stream; + + if (stream->timing.flags.DSC) { + hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + + /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/ + if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) { + ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1; + pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive; + + if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32) + pipe_ctx->hblank_borrow = 0; + } + } +} + +/** * dc_validate_global_state() - Determine if hardware can support a given state * * @dc: dc struct for this driver @@ -4064,6 +4100,10 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; + /* Decide whether hblank borrow is needed and save it in pipe_ctx */ + if (dc->debug.enable_hblank_borrow) + decide_hblank_borrow(pipe_ctx); + if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 104051935884..e9b9126c0401 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -290,6 +290,7 @@ struct dc_caps { uint16_t subvp_vertical_int_margin_us; bool seamless_odm; uint32_t max_v_total; + bool vtotal_limited_by_fp2; uint32_t max_disp_clock_khz_at_vmin; uint8_t subvp_drr_vblank_start_margin_us; bool cursor_not_scaled; @@ -1068,6 +1069,7 @@ struct dc_debug_options { unsigned int scale_to_sharpness_policy; bool skip_full_updated_if_possible; unsigned int enable_oled_edp_power_up_opt; + bool enable_hblank_borrow; }; @@ -2550,6 +2552,8 @@ struct dc_power_profile { struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context); +unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); + /* DSC Interfaces */ #include "dc_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index c8d8e335fa37..0e310fd48b5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -120,7 +120,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx); // Make spl input basic out info output_size width point to stream h active spl_in->basic_out.output_size.width = - stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow; // Make spl input basic out info output_size height point to v active spl_in->basic_out.output_size.height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index d851c081e376..8dabb1ac0b68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st * s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto; s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal); + s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 138b4b1e42ed..c6a5a8614679 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in // } } +static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) +{ + unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total; + + if (stream->ctx->dc->caps.vtotal_limited_by_fp2) { + max_hw_v_total -= stream->timing.v_front_porch + 1; + } + + return max_hw_v_total; +} + static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing, struct dc_stream_state *stream, struct dml2_context *dml_ctx) { - unsigned int hblank_start, vblank_start; + unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; @@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf - stream->timing.v_border_top - stream->timing.v_border_bottom; timing->drr_config.enabled = stream->ignore_msa_timing_param; - timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz; timing->drr_config.drr_active_variable = stream->vrr_active_variable; timing->drr_config.drr_active_fixed = stream->vrr_active_fixed; timing->drr_config.disallowed = !stream->allow_freesync; + /* limit min refresh rate to DC cap */ + min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz; + if (stream->ctx->dc->caps.max_v_total != 0) { + min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), + (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); + } + + if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) { + timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz; + } else { + timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz; + } + if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase && stream->ctx->dc->config.enable_fpo_flicker_detection == 1) timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false); @@ -422,6 +445,21 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf timing->vblank_nom = timing->v_total - timing->v_active; } +/** + * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration + * based on the pipe context. + * @timing: Pointer to the dml2_timing_cfg structure to be adjusted. + * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value. + * + * This function modifies the horizontal active and blank end timings by adding and subtracting + * the horizontal blanking borrow value from the pipe context, respectively. + */ +static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe) +{ + timing->h_active += pipe->hblank_borrow; + timing->h_blank_end -= pipe->hblank_borrow; +} + static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output, struct dc_stream_state *stream, const struct pipe_ctx *pipe) { @@ -709,6 +747,7 @@ static const struct scaler_data *get_scaler_data_for_plane( temp_pipe->plane_state = pipe->plane_state; temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; temp_pipe->stream_res = pipe->stream_res; + temp_pipe->hblank_borrow = pipe->hblank_borrow; dml_ctx->config.callbacks.build_scaling_params(temp_pipe); break; } @@ -973,6 +1012,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); + adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); @@ -1111,12 +1151,12 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; union dml2_global_sync_programming *global_sync = &stream_programming->global_sync; - hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right; + hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow; vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top; hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch; vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch; - hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right; + hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow; vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index d7f8b2dcaa6b..fa11f075d1f9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1049,7 +1049,8 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 5de11e2837c0..307782592789 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -820,6 +820,7 @@ enum dc_status dcn401_enable_stream_timing( int opp_cnt = 1; int opp_inst[MAX_PIPES] = {0}; struct pipe_ctx *opp_heads[MAX_PIPES] = {0}; + struct dc_crtc_timing patched_crtc_timing = stream->timing; bool manual_mode; unsigned int tmds_div = PIXEL_RATE_DIV_NA; unsigned int unused_div = PIXEL_RATE_DIV_NA; @@ -874,9 +875,13 @@ enum dc_status dcn401_enable_stream_timing( if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); + /* if we are borrowing from hblank, h_addressable needs to be adjusted */ + if (dc->debug.enable_hblank_borrow) + patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; + pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, - &stream->timing, + &patched_crtc_timing, pipe_ctx->pipe_dlg_param.vready_offset, pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 8597e866bfe6..2edd5b38ce4f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -219,6 +219,7 @@ struct resource_funcs { * Get indicator of power from a context that went through full validation */ int (*get_power_profile)(const struct dc_state *context); + unsigned int (*get_det_buffer_size)(const struct dc_state *context); }; struct audio_support{ @@ -477,6 +478,8 @@ struct pipe_ctx { /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ uint8_t subvp_index; struct pixel_rate_divider pixel_rate_divider; + /* pixels borrowed from hblank to hactive */ + uint8_t hblank_borrow; }; /* Data used for dynamic link encoder assignment. diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 41cab9ad6885..5d66bfc7fe6e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -808,7 +808,8 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 189d0c85872e..7a5b9aa5292c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -1510,6 +1510,7 @@ bool dcn20_split_stream_for_odm( if (prev_odm_pipe->plane_state) { struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; + struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp; int new_width; /* HACTIVE halved for odm combine */ @@ -1543,7 +1544,28 @@ bool dcn20_split_stream_for_odm( sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->h_active - sd->recout.x)); sd->recout.x = 0; + + /* + * When odm is used in YcbCr422 or 420 colour space, a split screen + * will be seen with the previous calculations since the extra left + * edge pixel is accounted for in fmt but not in viewport. + * + * Below are calculations which fix the split by fixing the calculations + * if there is an extra left edge pixel. + */ + if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count + && opp->funcs->opp_get_left_edge_extra_pixel_count( + opp, next_odm_pipe->stream->timing.pixel_encoding, + resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) { + sd->h_active += 1; + sd->recout.width += 1; + sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1)); + } } + if (!next_odm_pipe->top_pipe) next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; else @@ -2132,6 +2154,7 @@ bool dcn20_fast_validate_bw( ASSERT(0); } } + /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index cd31e4f16c14..bfd0eccbed28 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -2353,6 +2353,7 @@ static bool dcn30_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* read VBIOS LTTPR caps */ { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 02af8b8f4d27..7baefc910a3d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -1233,6 +1233,7 @@ static bool dcn302_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 7002a8dd358a..8a57d46ad15f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -1178,6 +1178,7 @@ static bool dcn303_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index c16cf1c8f7f9..54ec3d8e920c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1720,6 +1720,12 @@ int dcn31_populate_dml_pipes_from_context( return pipe_cnt; } +unsigned int dcn31_get_det_buffer_size( + const struct dc_state *context) +{ + return context->bw_ctx.dml.ip.det_buffer_size_kbytes; +} + void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -1842,6 +1848,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .update_bw_bounding_box = dcn31_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static struct clock_source *dcn30_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 901436591ed4..551ad912f7be 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); +unsigned int dcn31_get_det_buffer_size( + const struct dc_state *context); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index c0f48c78e968..2794473f2aff 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -1777,6 +1777,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static struct clock_source *dcn30_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 6c3295259a81..4ee33eb3381d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1845,6 +1845,7 @@ static struct resource_funcs dcn315_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn315_get_panel_config_defaults, .get_power_profile = dcn315_get_power_profile, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn315_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 6edaaadcb173..79eddbafe3c2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1719,6 +1719,7 @@ static struct resource_funcs dcn316_res_pool_funcs = { .update_bw_bounding_box = dcn316_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn316_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 01d1a11d5545..12d247a7ec45 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2189,6 +2189,7 @@ static bool dcn32_resource_construct( dc->caps.dmcub_support = true; dc->caps.seamless_odm = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -2803,6 +2804,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; + free_pipe->hblank_borrow = otg_master->hblank_borrow; if (free_pipe->stream->timing.flags.DSC == 1) { dcn20_acquire_dsc(free_pipe->stream->ctx->dc, &new_ctx->res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 5cb74fd9cb7d..06b9479c8bd3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1742,6 +1742,7 @@ static bool dcn321_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 6cc2960b6104..89e2adcf2a28 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1778,6 +1778,7 @@ static struct resource_funcs dcn35_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn35_resource_construct( @@ -1849,6 +1850,7 @@ static bool dcn35_resource_construct( dc->caps.zstate_support = true; dc->caps.ips_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index d87e2641cda1..263a37c1cd3a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1757,6 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, + .get_det_buffer_size = dcn31_get_det_buffer_size, }; static bool dcn351_resource_construct( @@ -1828,6 +1829,7 @@ static bool dcn351_resource_construct( dc->caps.zstate_support = true; dc->caps.ips_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index db93bac247c0..2a3dabfe3cea 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -1864,6 +1864,7 @@ static bool dcn401_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.max_v_total = (1 << 15) - 1; + dc->caps.vtotal_limited_by_fp2 = true; if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) dc->caps.dcc_plane_width_limit = 7680; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index f980a84dceef..2b3964529539 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -122,6 +122,17 @@ static unsigned int calc_duration_in_us_from_v_total( return duration_in_us; } +static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) +{ + unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total; + + if (stream->ctx->dc->caps.vtotal_limited_by_fp2) { + max_hw_v_total -= stream->timing.v_front_porch + 1; + } + + return max_hw_v_total; +} + unsigned int mod_freesync_calc_v_total_from_refresh( const struct dc_stream_state *stream, unsigned int refresh_in_uhz) @@ -1016,7 +1027,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) { min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), - (stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total)); + (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); } /* Limit minimum refresh rate to what can be supported by hardware */ min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ? diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 67a5de573943..d7acdd42d80f 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -164,6 +164,7 @@ enum amd_pp_task { }; enum PP_SMC_POWER_PROFILE { + PP_SMC_POWER_PROFILE_UNKNOWN = -1, PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0, PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1, PP_SMC_POWER_PROFILE_POWERSAVING = 0x2, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 136e8193867c..e8ae7681bf0a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1361,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, * create a custom set of heuristics, write a string of numbers to the file * starting with the number of the custom profile along with a setting * for each heuristic parameter. Due to differences across asic families - * the heuristic parameters vary from family to family. + * the heuristic parameters vary from family to family. Additionally, + * you can apply the custom heuristics to different clock domains. Each + * clock domain is considered a distinct operation so if you modify the + * gfxclk heuristics and then the memclk heuristics, the all of the + * custom heuristics will be retained until you switch to another profile. * */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index b8355293518f..21bd635bcdfc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -760,6 +764,7 @@ static int smu_early_init(struct amdgpu_ip_block *ip_block) smu->smu_baco.platform_support = false; smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; + smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; mutex_init(&smu->message_lock); @@ -1244,6 +1249,21 @@ static bool smu_is_workload_profile_available(struct smu_context *smu, return smu->workload_map && smu->workload_map[profile].valid_mapping; } +static void smu_init_power_profile(struct smu_context *smu) +{ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) { + if (smu->is_apu || + !smu_is_workload_profile_available( + smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + else + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu_power_profile_mode_get(smu, smu->power_profile_mode); +} + static int smu_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1259,42 +1279,13 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - - if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { - smu->driver_workload_mask = - 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - } else { - smu->driver_workload_mask = - 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - } - - smu->workload_mask = smu->driver_workload_mask | - smu->user_dpm_profile.user_workload_mask; - smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; - smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; - smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; - smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; - smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; + smu_init_power_profile(smu); smu->display_config = &adev->pm.pm_display_cfg; smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; @@ -1347,6 +1338,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block) return ret; } + if (smu->custom_profile_params) { + kfree(smu->custom_profile_params); + smu->custom_profile_params = NULL; + } + smu_fini_microcode(smu); return 0; @@ -2131,6 +2127,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block) if (!ret) adev->gfx.gfx_off_entrycount = count; + /* clear this on suspend so it will get reprogrammed on resume */ + smu->workload_mask = 0; + return 0; } @@ -2243,25 +2242,49 @@ static int smu_enable_umd_pstate(void *handle, } static int smu_bump_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size) + long *custom_params, + u32 custom_params_max_idx) { - int ret = 0; + u32 workload_mask = 0; + int i, ret = 0; + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + if (smu->workload_refcount[i]) + workload_mask |= 1 << i; + } + + if (smu->workload_mask == workload_mask) + return 0; if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, + custom_params, + custom_params_max_idx); + + if (!ret) + smu->workload_mask = workload_mask; return ret; } +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + smu->workload_refcount[profile_mode]++; +} + +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + if (smu->workload_refcount[profile_mode]) + smu->workload_refcount[profile_mode]--; +} + static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, - bool skip_display_settings, - bool init) + bool skip_display_settings) { int ret = 0; - int index = 0; - long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!skip_display_settings) { @@ -2298,14 +2321,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - - if (init || smu->power_profile_mode != workload[0]) - smu_bump_power_profile_mode(smu, workload, 0); - } + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, NULL, 0); return ret; } @@ -2324,13 +2341,13 @@ static int smu_handle_task(struct smu_context *smu, ret = smu_pre_display_config_changed(smu); if (ret) return ret; - ret = smu_adjust_power_state_dynamic(smu, level, false, false); + ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: - ret = smu_adjust_power_state_dynamic(smu, level, true, true); + ret = smu_adjust_power_state_dynamic(smu, level, true); break; case AMD_PP_TASK_READJUST_POWER_STATE: - ret = smu_adjust_power_state_dynamic(smu, level, true, false); + ret = smu_adjust_power_state_dynamic(smu, level, true); break; default: break; @@ -2352,12 +2369,11 @@ static int smu_handle_dpm_task(void *handle, static int smu_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, - bool en) + bool enable) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload[1]; - uint32_t index; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -2365,24 +2381,21 @@ static int smu_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - if (!en) { - smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } else { - smu->driver_workload_mask |= (1 << smu->workload_priority[type]); - index = fls(smu->workload_mask); - index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } - - smu->workload_mask = smu->driver_workload_mask | - smu->user_dpm_profile.user_workload_mask; - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, workload, 0); + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (enable) + smu_power_profile_mode_get(smu, type); + else + smu_power_profile_mode_put(smu, type); + ret = smu_bump_power_profile_mode(smu, NULL, 0); + if (ret) { + if (enable) + smu_power_profile_mode_put(smu, type); + else + smu_power_profile_mode_get(smu, type); + return ret; + } + } return 0; } @@ -3074,21 +3087,33 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; - int ret; + bool custom = false; + int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - if (smu->user_dpm_profile.user_workload_mask & - (1 << smu->workload_priority[param[param_size]])) - return 0; + if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { + custom = true; + /* clear frontend mask so custom changes propogate */ + smu->workload_mask = 0; + } - smu->user_dpm_profile.user_workload_mask = - (1 << smu->workload_priority[param[param_size]]); - smu->workload_mask = smu->user_dpm_profile.user_workload_mask | - smu->driver_workload_mask; - ret = smu_bump_power_profile_mode(smu, param, param_size); + if ((param[param_size] != smu->power_profile_mode) || custom) { + /* clear the old user preference */ + smu_power_profile_mode_put(smu, smu->power_profile_mode); + /* set the new user preference */ + smu_power_profile_mode_get(smu, param[param_size]); + ret = smu_bump_power_profile_mode(smu, + custom ? param : NULL, + custom ? param_size : 0); + if (ret) + smu_power_profile_mode_put(smu, param[param_size]); + else + /* store the user's preference */ + smu->power_profile_mode = param[param_size]; + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index d665c47f19b7..3925815358ce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,7 +240,6 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; - uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,12 +556,13 @@ struct smu_context { uint32_t hard_min_uclk_req_from_dal; bool disable_uclk_switch; + /* asic agnostic workload mask */ uint32_t workload_mask; - uint32_t driver_workload_mask; - uint32_t workload_priority[WORKLOAD_POLICY_MAX]; - uint32_t workload_setting[WORKLOAD_POLICY_MAX]; + /* default/user workload preference */ uint32_t power_profile_mode; - uint32_t default_power_profile_mode; + uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; + /* backend specific custom workload settings */ + long *custom_profile_params; bool pm_enabled; bool is_apu; @@ -733,9 +733,12 @@ struct pptable_funcs { * @set_power_profile_mode: Set a power profile mode. Also used to * create/set custom power profile modes. * &input: Power profile mode parameters. - * &size: Size of &input. + * &workload_mask: mask of workloads to enable + * &custom_params: custom profile parameters + * &custom_params_max_idx: max valid idx into custom_params */ - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); + int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask, + long *custom_params, u32 custom_params_max_idx); /** * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 12125303bb79..8aa61a9f7778 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1445,97 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, return size; } -static int arcturus_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define ARCTURUS_CUSTOM_PARAMS_COUNT 10 +#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type = 0; - uint32_t profile_mode = input[size]; - int ret = 0; + int ret, idx; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu->smc_fw_version >= 0x360d00)) { - if (size != 10) - return -EINVAL; + idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_UseRlcBusy = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Uclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_UseRlcBusy = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_UseRlcBusy = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Uclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_UseRlcBusy = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: + return ret; +} + +static int arcturus_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (smu->smc_fw_version < 0x360d00) return -EINVAL; + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = arcturus_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - } - - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT - * Not all profile modes are supported on arcturus. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); - return -EINVAL; + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE); } ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - smu->workload_mask, - NULL); + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); if (ret) { - dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - smu_cmn_assign_power_profile(smu); - - return 0; + return ret; } static int arcturus_set_performance_level(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 211635dabed8..7fad5dfb39c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2006,90 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) return size; } -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define NAVI10_CUSTOM_PARAMS_COUNT 10 +#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3 +#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int navi10_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_MinFreqStep = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor.Soc_FPS = input[idx + 1]; + activity_monitor.Soc_MinFreqStep = input[idx + 2]; + activity_monitor.Soc_MinActiveFreqType = input[idx + 3]; + activity_monitor.Soc_MinActiveFreq = input[idx + 4]; + activity_monitor.Soc_BoosterFreqType = input[idx + 5]; + activity_monitor.Soc_BoosterFreq = input[idx + 6]; + activity_monitor.Soc_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_MinFreqStep = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } +static int navi10_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_MinFreqStep = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor.Soc_FPS = input[1]; - activity_monitor.Soc_MinFreqStep = input[2]; - activity_monitor.Soc_MinActiveFreqType = input[3]; - activity_monitor.Soc_MinActiveFreq = input[4]; - activity_monitor.Soc_BoosterFreqType = input[5]; - activity_monitor.Soc_BoosterFreq = input[6]; - activity_monitor.Soc_PD_Data_limit_c = input[7]; - activity_monitor.Soc_PD_Data_error_coeff = input[8]; - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_MinFreqStep = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), true); + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT) + return -EINVAL; + idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = navi10_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else - smu_cmn_assign_power_profile(smu); + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index d0ed0d060a8a..286777ada1df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1708,93 +1708,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * return size; } -static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10 +#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3 +#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinFreqStep = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_BoosterFreq = input[idx + 6]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinFreqStep = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_BoosterFreq = input[idx + 6]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor->Mem_FPS = input[idx + 1]; + activity_monitor->Mem_MinFreqStep = input[idx + 2]; + activity_monitor->Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor->Mem_MinActiveFreq = input[idx + 4]; + activity_monitor->Mem_BoosterFreqType = input[idx + 5]; + activity_monitor->Mem_BoosterFreq = input[idx + 6]; + activity_monitor->Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinFreqStep = input[2]; - activity_monitor->Gfx_MinActiveFreqType = input[3]; - activity_monitor->Gfx_MinActiveFreq = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_BoosterFreq = input[6]; - activity_monitor->Gfx_PD_Data_limit_c = input[7]; - activity_monitor->Gfx_PD_Data_error_coeff = input[8]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinFreqStep = input[2]; - activity_monitor->Fclk_MinActiveFreqType = input[3]; - activity_monitor->Fclk_MinActiveFreq = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_BoosterFreq = input[6]; - activity_monitor->Fclk_PD_Data_limit_c = input[7]; - activity_monitor->Fclk_PD_Data_error_coeff = input[8]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memclk */ - activity_monitor->Mem_FPS = input[1]; - activity_monitor->Mem_MinFreqStep = input[2]; - activity_monitor->Mem_MinActiveFreqType = input[3]; - activity_monitor->Mem_MinActiveFreq = input[4]; - activity_monitor->Mem_BoosterFreqType = input[5]; - activity_monitor->Mem_BoosterFreq = input[6]; - activity_monitor->Mem_PD_Data_limit_c = input[7]; - activity_monitor->Mem_PD_Data_error_coeff = input[8]; - activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; - break; - default: - return -EINVAL; - } +static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = sienna_cichlid_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else - smu_cmn_assign_power_profile(smu); + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index f89c487dce72..a55ea76d7399 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1056,42 +1056,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, return size; } -static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int vangogh_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; + u32 backend_workload_mask = 0; + int ret; - if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", - profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - smu->workload_mask, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); + dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n", + workload_mask); return ret; } - smu_cmn_assign_power_profile(smu); - - return 0; + return ret; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 75a9ea87f419..37d82a71a2d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -864,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; } -static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int renoir_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; + int ret; + u32 backend_workload_mask = 0; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - /* - * TODO: If some case need switch to powersave/default power mode - * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. - */ - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - smu->workload_mask, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n", + workload_mask); return ret; } - smu_cmn_assign_power_profile(smu); - - return 0; + return ret; } static int renoir_set_peak_clock_by_device(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 80c6b1e523aa..3aa705aae4c0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2571,82 +2571,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9 +#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; - u32 workload_mask; - - smu->power_profile_mode = input[size]; + int ret, idx; - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } - - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - default: - return -EINVAL; - } + idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); + return ret; +} - if (workload_type < 0) - return -EINVAL; +static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int workload_type, ret, idx = -1, i; - workload_mask = 1 << workload_type; + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && @@ -2658,24 +2652,47 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, CMN2ASIC_MAPPING_WORKLOAD, PP_SMC_POWER_PROFILE_POWERSAVING); if (workload_type >= 0) - workload_mask |= 1 << workload_type; + backend_workload_mask |= 1 << workload_type; } - smu->workload_mask |= workload_mask; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - smu->workload_mask, - NULL); - if (!ret) { - smu_cmn_assign_power_profile(smu); - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_FULLSCREEN3D); - smu->power_profile_mode = smu->workload_mask & (1 << workload_type) - ? PP_SMC_POWER_PROFILE_FULLSCREEN3D - : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_0_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE); + } + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; } return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 4fd0354bd312..aabb94796005 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2530,79 +2530,110 @@ out: return result; } -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8 +#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_ActiveHystLimit = input[idx + 1]; + activity_monitor->Gfx_IdleHystLimit = input[idx + 2]; + activity_monitor->Gfx_FPS = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 6]; + activity_monitor->Gfx_BoosterFreq = input[idx + 7]; + } + idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_ActiveHystLimit = input[idx + 1]; + activity_monitor->Fclk_IdleHystLimit = input[idx + 2]; + activity_monitor->Fclk_FPS = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 6]; + activity_monitor->Fclk_BoosterFreq = input[idx + 7]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 8) - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_ActiveHystLimit = input[1]; - activity_monitor->Gfx_IdleHystLimit = input[2]; - activity_monitor->Gfx_FPS = input[3]; - activity_monitor->Gfx_MinActiveFreqType = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_MinActiveFreq = input[6]; - activity_monitor->Gfx_BoosterFreq = input[7]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_ActiveHystLimit = input[1]; - activity_monitor->Fclk_IdleHystLimit = input[2]; - activity_monitor->Fclk_FPS = input[3]; - activity_monitor->Fclk_MinActiveFreqType = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_MinActiveFreq = input[6]; - activity_monitor->Fclk_BoosterFreq = input[7]; - break; - default: - return -EINVAL; +static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_7_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); + backend_workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else - smu_cmn_assign_power_profile(smu); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2779,4 +2810,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_7_workload_map; smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 687a0f5ac94f..5cad09c5f2ff 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1739,89 +1739,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9 +#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; - uint32_t current_profile_mode = smu->power_profile_mode; - smu->power_profile_mode = input[size]; + int ret, idx; - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; + idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - default: - return -EINVAL; - } + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } - } +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + /* disable deep sleep if compute is enabled */ + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) smu_v14_0_deep_sleep_control(smu, false); - else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + else smu_v14_0_deep_sleep_control(smu, true); - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v14_0_2_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE); + } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); - - if (!ret) - smu_cmn_assign_power_profile(smu); + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2065,7 +2096,7 @@ static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, FEATURE_PWR_GFX, NULL); else diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index dbbd3759bff3..9f55207ea9bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1144,14 +1144,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } -void smu_cmn_assign_power_profile(struct smu_context *smu) -{ - uint32_t index; - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - smu->power_profile_mode = smu->workload_setting[index]; -} - bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; @@ -1229,3 +1221,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy) { policy->desc = &xgmi_plpd_policy_desc; } + +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask) +{ + int workload_type; + u32 profile_mode; + + *backend_workload_mask = 0; + + for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) { + if (!(workload_mask & (1 << profile_mode))) + continue; + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + profile_mode); + + if (workload_type < 0) + continue; + + *backend_workload_mask |= 1 << workload_type; + } +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 8a801e389659..a020277dec3e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); -void smu_cmn_assign_power_profile(struct smu_context *smu); - /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. @@ -149,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask); + #endif #endif |