diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 1769c4cba2ad..8058ea91ecaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20), }; -/** +/* * This shader is used to clear VGPRS and LDS, and also write the input * pattern into the write back buffer, which will be used by driver to * check whether all SIMDs have been covered. @@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = { { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; -/** +/* * The below shaders are used to clear SGPRS, and also write the input * pattern into the write back buffer. The first two dispatch should be * scheduled simultaneously which make sure that all SGPRS could be @@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = { { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; -/** +/* * This shader is used to clear the uninitiated sgprs after the above * two dispatches, because of hardware feature, dispath 0 couldn't clear * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs @@ -412,7 +412,7 @@ static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev, r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr); if (r) { dev_err(adev->dev, "ib submit failed (%d).\n", r); - amdgpu_ib_free(adev, ib, NULL); + amdgpu_ib_free(ib, NULL); } return r; } @@ -611,16 +611,16 @@ static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev) } disp2_failed: - amdgpu_ib_free(adev, &disp_ibs[2], NULL); + amdgpu_ib_free(&disp_ibs[2], NULL); dma_fence_put(fences[2]); disp1_failed: - amdgpu_ib_free(adev, &disp_ibs[1], NULL); + amdgpu_ib_free(&disp_ibs[1], NULL); dma_fence_put(fences[1]); disp0_failed: - amdgpu_ib_free(adev, &disp_ibs[0], NULL); + amdgpu_ib_free(&disp_ibs[0], NULL); dma_fence_put(fences[0]); pro_end: - amdgpu_ib_free(adev, &wb_ib, NULL); + amdgpu_ib_free(&wb_ib, NULL); if (r) dev_info(adev->dev, "Init SGPRS Failed\n"); @@ -687,10 +687,10 @@ static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev) } disp_failed: - amdgpu_ib_free(adev, &disp_ib, NULL); + amdgpu_ib_free(&disp_ib, NULL); dma_fence_put(fence); pro_end: - amdgpu_ib_free(adev, &wb_ib, NULL); + amdgpu_ib_free(&wb_ib, NULL); if (r) dev_info(adev->dev, "Init VGPRS Failed\n"); @@ -706,6 +706,11 @@ int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return 0; + /* Workaround for ALDEBARAN, skip GPRs init in GPU reset. + Will remove it once GPRs init algorithm works for all CU settings. */ + if (amdgpu_in_reset(adev)) + return 0; + gfx_v9_4_2_do_sgprs_init(adev); gfx_v9_4_2_do_vgprs_init(adev); @@ -741,8 +746,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, die_id); break; } +} + +void gfx_v9_4_2_init_sq(struct amdgpu_device *adev) +{ + uint32_t data; - return; + if (adev->gfx.mec_fw_version >= 98) { + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + data = RREG32_SOC15(GC, 0, regSQ_CONFIG1); + data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); + WREG32_SOC15(GC, 0, regSQ_CONFIG1, data); + } } void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, @@ -756,7 +771,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, for (i = first_vmid; i < last_vmid; i++) { data = 0; - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, @@ -764,15 +779,18 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data); } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA1), 0); } void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) { u32 tmp; - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); tmp = 0; tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1); @@ -1540,8 +1558,8 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, uint32_t ded_cnt) { uint32_t bank, way, mem; - static const char *vml2_way_str[] = { "BIGK", "4K" }; - static const char *utcl2_rounter_str[] = { "VMC", "APT" }; + static const char * const vml2_way_str[] = { "BIGK", "4K" }; + static const char * const utcl2_router_str[] = { "VMC", "APT" }; mem = instance % blk->num_mem_blocks; way = (instance / blk->num_mem_blocks) % blk->num_ways; @@ -1562,7 +1580,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, dev_info( adev->dev, "GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n", - bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt); + bank, utcl2_router_str[mem], sec_cnt, ded_cnt); break; case ATC_L2_CACHE_2M: dev_info( @@ -1636,14 +1654,14 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; uint32_t sec_count = 0, ded_count = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -1656,7 +1674,6 @@ static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - return 0; } static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) @@ -1695,28 +1712,6 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) -{ - struct ras_inject_if *info = (struct ras_inject_if *)inject_if; - int ret; - struct ta_ras_trigger_error_input block_info = { 0 }; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; - - block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); - block_info.sub_block_index = info->head.sub_block_index; - block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); - block_info.address = info->address; - block_info.value = info->value; - - mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); - mutex_unlock(&adev->grbm_idx_mutex); - - return ret; -} - static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) { uint32_t i, j; @@ -1926,13 +1921,18 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, - .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, + + +struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { + .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, + .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_2_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_2_ras_ops, + }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, }; |
