diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 298 |
1 files changed, 176 insertions, 122 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9..20983f126b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -36,9 +36,25 @@ #define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 +static const struct amdgpu_hwip_reg_entry jpeg_reg_list_2_5[] = { + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS), + SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE), + SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG), + SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE), + SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH), + SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH), +}; + static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); -static int jpeg_v2_5_set_powergating_state(void *handle, +static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev); @@ -50,16 +66,17 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v2_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_5_early_init(void *handle) +static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; int i; + adev->jpeg.num_jpeg_rings = 1; adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); @@ -80,15 +97,15 @@ static int jpeg_v2_5_early_init(void *handle) /** * jpeg_v2_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_5_sw_init(void *handle) +static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -102,13 +119,13 @@ static int jpeg_v2_5_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; } @@ -125,12 +142,12 @@ static int jpeg_v2_5_sw_init(void *handle) if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; + ring = adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0)) + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, @@ -138,33 +155,45 @@ static int jpeg_v2_5_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); } r = amdgpu_jpeg_ras_sw_init(adev); if (r) return r; - return 0; + r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_2_5, ARRAY_SIZE(jpeg_reg_list_2_5)); + if (r) + return r; + + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + + return r; } /** * jpeg_v2_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_5_sw_fini(void *handle) +static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); + r = amdgpu_jpeg_sw_fini(adev); return r; @@ -173,12 +202,12 @@ static int jpeg_v2_5_sw_fini(void *handle) /** * jpeg_v2_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_5_hw_init(void *handle) +static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -186,7 +215,7 @@ static int jpeg_v2_5_hw_init(void *handle) if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; + ring = adev->jpeg.inst[i].ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); @@ -195,24 +224,22 @@ static int jpeg_v2_5_hw_init(void *handle) return r; } - DRM_INFO("JPEG decode initialized successfully.\n"); - return 0; } /** * jpeg_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_5_hw_fini(void *handle) +static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; - cancel_delayed_work_sync(&adev->vcn.idle_work); + cancel_delayed_work_sync(&adev->jpeg.idle_work); for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -220,7 +247,10 @@ static int jpeg_v2_5_hw_fini(void *handle) if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) - jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + jpeg_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); } return 0; @@ -229,20 +259,19 @@ static int jpeg_v2_5_hw_fini(void *handle) /** * jpeg_v2_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_5_suspend(void *handle) +static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_5_hw_fini(adev); + r = jpeg_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -250,20 +279,19 @@ static int jpeg_v2_5_suspend(void *handle) /** * jpeg_v2_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_5_resume(void *handle) +static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_5_hw_init(adev); + r = jpeg_v2_5_hw_init(ip_block); return r; } @@ -310,6 +338,44 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst) WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); } +static void jpeg_v2_5_start_inst(struct amdgpu_device *adev, int i) +{ + struct amdgpu_ring *ring = adev->jpeg.inst[i].ring_dec; + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + jpeg_v2_5_disable_clock_gating(adev, i); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); +} + /** * jpeg_v2_5_start - start JPEG block * @@ -319,52 +385,33 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst) */ static int jpeg_v2_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) continue; + jpeg_v2_5_start_inst(adev, i); - ring = &adev->jpeg.inst[i].ring_dec; - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - /* JPEG disable CGC */ - jpeg_v2_5_disable_clock_gating(adev, i); - - /* MJPEG global tiling registers */ - WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); } return 0; } +static void jpeg_v2_5_stop_inst(struct amdgpu_device *adev, int i) +{ + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v2_5_enable_clock_gating(adev, i); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); +} + /** * jpeg_v2_5_stop - stop JPEG block * @@ -379,18 +426,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev) for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) continue; - - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - jpeg_v2_5_enable_clock_gating(adev, i); - - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + jpeg_v2_5_stop_inst(adev, i); } return 0; @@ -482,9 +518,9 @@ static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring) amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); } -static bool jpeg_v2_5_is_idle(void *handle) +static bool jpeg_v2_5_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 1; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -499,9 +535,9 @@ static bool jpeg_v2_5_is_idle(void *handle) return ret; } -static int jpeg_v2_5_wait_for_idle(void *handle) +static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -518,10 +554,10 @@ static int jpeg_v2_5_wait_for_idle(void *handle) return 0; } -static int jpeg_v2_5_set_clockgating_state(void *handle, +static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool enable = (state == AMD_CG_STATE_GATE); int i; @@ -530,7 +566,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, continue; if (enable) { - if (!jpeg_v2_5_is_idle(handle)) + if (!jpeg_v2_5_is_idle(ip_block)) return -EBUSY; jpeg_v2_5_enable_clock_gating(adev, i); } else { @@ -541,13 +577,13 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, return 0; } -static int jpeg_v2_5_set_powergating_state(void *handle, +static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; - if(state == adev->jpeg.cur_state) + if (state == adev->jpeg.cur_state) return 0; if (state == AMD_PG_STATE_GATE) @@ -555,7 +591,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle, else ret = jpeg_v2_5_start(adev); - if(!ret) + if (!ret) adev->jpeg.cur_state = state; return ret; @@ -569,6 +605,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -591,11 +635,7 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); - break; - case VCN_2_6__SRCID_DJPEG0_POISON: - case VCN_2_6__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); + amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -606,10 +646,19 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + jpeg_v2_5_stop_inst(ring->adev, ring->me); + jpeg_v2_5_start_inst(ring->adev, ring->me); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .name = "jpeg_v2_5", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -618,18 +667,15 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, + .dump_ip_state = amdgpu_jpeg_dump_ip_state, + .print_ip_state = amdgpu_jpeg_print_ip_state, }; static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .name = "jpeg_v2_6", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -638,12 +684,10 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, + .dump_ip_state = amdgpu_jpeg_dump_ip_state, + .print_ip_state = amdgpu_jpeg_print_ip_state, }; static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { @@ -652,6 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -673,6 +718,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v2_5_ring_reset, }; static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { @@ -681,6 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -702,6 +749,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v2_5_ring_reset, }; static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -712,11 +760,10 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) if (adev->jpeg.harvest_config & (1 << i)) continue; if (adev->asic_type == CHIP_ARCTURUS) - adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_5_dec_ring_vm_funcs; else /* CHIP_ALDEBARAN */ - adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs; - adev->jpeg.inst[i].ring_dec.me = i; - DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); + adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_6_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec->me = i; } } @@ -725,6 +772,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { .process = jpeg_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { + .set = jpeg_v2_6_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -735,11 +787,13 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; } } -const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = -{ +const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_JPEG, .major = 2, .minor = 5, @@ -747,8 +801,7 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = .funcs = &jpeg_v2_5_ip_funcs, }; -const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = -{ +const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = { .type = AMD_IP_BLOCK_TYPE_JPEG, .major = 2, .minor = 6, @@ -800,12 +853,13 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { .ras_block = { .hw_ops = &jpeg_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->ip_versions[JPEG_HWIP][0]) { + switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { case IP_VERSION(2, 6, 0): adev->jpeg.ras = &jpeg_v2_6_ras; break; |
