diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 772 |
1 files changed, 631 insertions, 141 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 31db356476f8..ceb94bbb03a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -23,7 +23,7 @@ */ #include <linux/firmware.h> -#include <drm/drmP.h> + #include "amdgpu.h" #include "amdgpu_uvd.h" #include "vid.h" @@ -36,18 +36,38 @@ #include "bif/bif_5_1_d.h" #include "gmc/gmc_8_1_d.h" #include "vi.h" +#include "ivsrcid/ivsrcid_vislands30.h" + +/* Polaris10/11/12 firmware version */ +#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); +static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); + static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v6_0_start(struct amdgpu_device *adev); static void uvd_v6_0_stop(struct amdgpu_device *adev); static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); -static int uvd_v6_0_set_clockgating_state(void *handle, +static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state); static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable); /** +* uvd_v6_0_enc_support - get encode support status +* +* @adev: amdgpu_device pointer +* +* Returns the current hardware encode support status +*/ +static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) +{ + return ((adev->asic_type >= CHIP_POLARIS10) && + (adev->asic_type <= CHIP_VEGAM) && + (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); +} + +/** * uvd_v6_0_ring_get_rptr - get read pointer * * @ring: amdgpu_ring pointer @@ -62,6 +82,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) } /** + * uvd_v6_0_enc_ring_get_rptr - get enc read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc read pointer + */ +static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->uvd.inst->ring_enc[0]) + return RREG32(mmUVD_RB_RPTR); + else + return RREG32(mmUVD_RB_RPTR2); +} +/** * uvd_v6_0_ring_get_wptr - get write pointer * * @ring: amdgpu_ring pointer @@ -76,6 +112,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) } /** + * uvd_v6_0_enc_ring_get_wptr - get enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc write pointer + */ +static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->uvd.inst->ring_enc[0]) + return RREG32(mmUVD_RB_WPTR); + else + return RREG32(mmUVD_RB_WPTR2); +} + +/** * uvd_v6_0_ring_set_wptr - set write pointer * * @ring: amdgpu_ring pointer @@ -89,78 +142,339 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v6_0_early_init(void *handle) +/** + * uvd_v6_0_enc_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->uvd.inst->ring_enc[0]) + WREG32(mmUVD_RB_WPTR, + lower_32_bits(ring->wptr)); + else + WREG32(mmUVD_RB_WPTR2, + lower_32_bits(ring->wptr)); +} + +/** + * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working + * + * @ring: the engine to test on + * + */ +static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t rptr; + unsigned i; + int r; + + r = amdgpu_ring_alloc(ring, 16); + if (r) + return r; + + rptr = amdgpu_ring_get_rptr(ring); + + amdgpu_ring_write(ring, HEVC_ENC_CMD_END); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + if (amdgpu_ring_get_rptr(ring) != rptr) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + +/** + * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg + * + * @ring: ring we should submit the msg to + * @handle: session handle to use + * @bo: amdgpu object for which we query the offset + * @fence: optional fence to return + * + * Open up a stream for HW test + */ +static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence) +{ + const unsigned ib_size_dw = 16; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + uint64_t addr; + int i, r; + + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); + if (r) + return r; + + ib = &job->ibs[0]; + addr = amdgpu_bo_gpu_offset(bo); + + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x00000018; + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ + ib->ptr[ib->length_dw++] = handle; + ib->ptr[ib->length_dw++] = 0x00010000; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; + + ib->ptr[ib->length_dw++] = 0x00000014; + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0x0000001c; + ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000000; + + ib->ptr[ib->length_dw++] = 0x00000008; + ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err; + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + return 0; + +err: + amdgpu_job_free(job); + return r; +} + +/** + * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg + * + * @ring: ring we should submit the msg to + * @handle: session handle to use + * @bo: amdgpu object for which we query the offset + * @fence: optional fence to return + * + * Close up a stream for HW test or if userspace failed to do so + */ +static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, + uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence) +{ + const unsigned ib_size_dw = 16; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + uint64_t addr; + int i, r; + + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); + if (r) + return r; + + ib = &job->ibs[0]; + addr = amdgpu_bo_gpu_offset(bo); + + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x00000018; + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ + ib->ptr[ib->length_dw++] = handle; + ib->ptr[ib->length_dw++] = 0x00010000; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; + + ib->ptr[ib->length_dw++] = 0x00000014; + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0x0000001c; + ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000000; + + ib->ptr[ib->length_dw++] = 0x00000008; + ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err; + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + return 0; + +err: + amdgpu_job_free(job); + return r; +} + +/** + * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working + * + * @ring: the engine to test on + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + */ +static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; + long r; + + r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); + if (r) + goto error; + + r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); + if (r) + goto error; + + r = dma_fence_wait_timeout(fence, false, timeout); + if (r == 0) + r = -ETIMEDOUT; + else if (r > 0) + r = 0; + +error: + dma_fence_put(fence); + return r; +} + +static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + adev->uvd.num_uvd_inst = 1; + + if (!(adev->flags & AMD_IS_APU) && + (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) + return -ENOENT; uvd_v6_0_set_ring_funcs(adev); + + if (uvd_v6_0_enc_support(adev)) { + adev->uvd.num_enc_rings = 2; + uvd_v6_0_set_enc_ring_funcs(adev); + } + uvd_v6_0_set_irq_funcs(adev); return 0; } -static int uvd_v6_0_sw_init(void *handle) +static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r; + struct amdgpu_device *adev = ip_block->adev; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); if (r) return r; + /* UVD ENC TRAP */ + if (uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); + if (r) + return r; + } + } + r = amdgpu_uvd_sw_init(adev); if (r) return r; + if (!uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + adev->uvd.inst->ring_enc[i].funcs = NULL; + + adev->uvd.inst->irq.num_types = 1; + adev->uvd.num_enc_rings = 0; + + DRM_INFO("UVD ENC is disabled\n"); + } + + ring = &adev->uvd.inst->ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + r = amdgpu_uvd_resume(adev); if (r) return r; - ring = &adev->uvd.ring; - sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + if (uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst->ring_enc[i]; + sprintf(ring->name, "uvd_enc%d", i); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + } + } return r; } -static int uvd_v6_0_sw_fini(void *handle) +static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) return r; + if (uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); + } + return amdgpu_uvd_sw_fini(adev); } /** * uvd_v6_0_hw_init - start and test UVD block * - * @adev: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v6_0_hw_init(void *handle) +static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; - int r; + int i, r; amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + uvd_v6_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE); uvd_v6_0_enable_mgcg(adev, true); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } r = amdgpu_ring_alloc(ring, 10); if (r) { @@ -189,9 +503,22 @@ static int uvd_v6_0_hw_init(void *handle) amdgpu_ring_commit(ring); + if (uvd_v6_0_enc_support(adev)) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst->ring_enc[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + } + done: - if (!r) - DRM_INFO("UVD initialized successfully.\n"); + if (!r) { + if (uvd_v6_0_enc_support(adev)) + DRM_INFO("UVD and UVD ENC initialized successfully.\n"); + else + DRM_INFO("UVD initialized successfully.\n"); + } return r; } @@ -199,51 +526,74 @@ done: /** * uvd_v6_0_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v6_0_hw_fini(void *handle) +static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_device *adev = ip_block->adev; + + cancel_delayed_work_sync(&adev->uvd.idle_work); if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); - ring->ready = false; - return 0; } -static int uvd_v6_0_suspend(void *handle) +static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + return amdgpu_uvd_prepare_suspend(adev); +} + +static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } - r = uvd_v6_0_hw_fini(adev); + r = uvd_v6_0_hw_fini(ip_block); if (r) return r; - /* Skip this for APU for now */ - if (!(adev->flags & AMD_IS_APU)) - r = amdgpu_uvd_suspend(adev); - - return r; + return amdgpu_uvd_suspend(adev); } -static int uvd_v6_0_resume(void *handle) +static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* Skip this for APU for now */ - if (!(adev->flags & AMD_IS_APU)) { - r = amdgpu_uvd_resume(adev); - if (r) - return r; - } - return uvd_v6_0_hw_init(adev); + r = amdgpu_uvd_resume(ip_block->adev); + if (r) + return r; + + return uvd_v6_0_hw_init(ip_block); } /** @@ -258,14 +608,14 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) uint64_t offset; uint32_t size; - /* programm memory controller bits 0-27 */ + /* program memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; - size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + size = AMDGPU_UVD_FIRMWARE_SIZE(adev); WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); WREG32(mmUVD_VCPU_CACHE_SIZE0, size); @@ -375,7 +725,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, */ static int uvd_v6_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -500,7 +850,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* set the wb address */ WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -514,6 +864,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); + if (uvd_v6_0_enc_support(adev)) { + ring = &adev->uvd.inst->ring_enc[0]; + WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->uvd.inst->ring_enc[1]; + WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4); + } + return 0; } @@ -550,7 +916,9 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) * uvd_v6_0_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -577,29 +945,35 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** - * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush + * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command * * @ring: amdgpu_ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags * - * Emits an hdp flush. + * Write enc a fence and a trap command to the ring. */ -static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags) { - amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); - amdgpu_ring_write(ring, 0); + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE); + amdgpu_ring_write(ring, addr); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); } /** - * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate + * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing * * @ring: amdgpu_ring pointer - * - * Emits an hdp invalidate. */ -static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); - amdgpu_ring_write(ring, 1); + /* The firmware doesn't seem to like touching registers at this point. */ } /** @@ -618,11 +992,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); @@ -630,17 +1002,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) tmp = RREG32(mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } - if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } @@ -648,16 +1015,21 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) * uvd_v6_0_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + uint32_t flags) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -667,36 +1039,52 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) +/** + * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @job: job to retrive vmid from + * @ib: indirect buffer to execute + * @flags: unused + * + * Write enc ring commands to execute the indirect buffer + */ +static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) { - uint32_t reg; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); - if (vm_id < 8) - reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; - else - reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; + amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); + amdgpu_ring_write(ring, vmid); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); +} +static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); - amdgpu_ring_write(ring, pd_addr >> 12); + amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0x8); +} - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); - amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); - amdgpu_ring_write(ring, 0x8); +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0xC); } @@ -718,29 +1106,68 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xE); } -static bool uvd_v6_0_is_idle(void *handle) +static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + +static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, seq); +} + +static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, HEVC_ENC_CMD_END); +} + +static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr) +{ + amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); + amdgpu_ring_write(ring, vmid); + amdgpu_ring_write(ring, pd_addr >> 12); + + amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); + amdgpu_ring_write(ring, vmid); +} + +static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v6_0_wait_for_idle(void *handle) +static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v6_0_is_idle(handle)) + if (uvd_v6_0_is_idle(ip_block)) return 0; } return -ETIMEDOUT; } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -750,33 +1177,33 @@ static bool uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } -static int uvd_v6_0_pre_soft_reset(void *handle) +static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v6_0_stop(adev); return 0; } -static int uvd_v6_0_soft_reset(void *handle) +static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -800,11 +1227,11 @@ static int uvd_v6_0_soft_reset(void *handle) return 0; } -static int uvd_v6_0_post_soft_reset(void *handle) +static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -825,8 +1252,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + bool int_handled = true; DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + + switch (entry->src_id) { + case 124: + amdgpu_fence_process(&adev->uvd.inst->ring); + break; + case 119: + if (likely(uvd_v6_0_enc_support(adev))) + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); + else + int_handled = false; + break; + case 120: + if (likely(uvd_v6_0_enc_support(adev))) + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); + else + int_handled = false; + break; + } + + if (!int_handled) + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + return 0; } @@ -1002,15 +1452,15 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, } } -static int uvd_v6_0_set_clockgating_state(void *handle, +static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + struct amdgpu_device *adev = ip_block->adev; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ - if (uvd_v6_0_wait_for_idle(handle)) + if (uvd_v6_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v6_0_enable_clock_gating(adev, true); /* enable HW gates because UVD is idle */ @@ -1023,7 +1473,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, return 0; } -static int uvd_v6_0_set_powergating_state(void *handle, +static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { /* This doesn't actually powergate the UVD block. @@ -1033,7 +1483,7 @@ static int uvd_v6_0_set_powergating_state(void *handle, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); @@ -1050,9 +1500,9 @@ out: return ret; } -static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) +static void uvd_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int data; mutex_lock(&adev->pm.mutex); @@ -1079,11 +1529,11 @@ out: static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .name = "uvd_v6_0", .early_init = uvd_v6_0_early_init, - .late_init = NULL, .sw_init = uvd_v6_0_sw_init, .sw_fini = uvd_v6_0_sw_fini, .hw_init = uvd_v6_0_hw_init, .hw_fini = uvd_v6_0_hw_fini, + .prepare_suspend = uvd_v6_0_prepare_suspend, .suspend = uvd_v6_0_suspend, .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, @@ -1100,43 +1550,41 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, + .no_user_fence = true, .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_frame_size = - 2 + /* uvd_v6_0_ring_emit_hdp_flush */ - 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ + 6 + /* hdp invalidate */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, - .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v6_0_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, + .emit_wreg = uvd_v6_0_ring_emit_wreg, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, + .no_user_fence = true, .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .emit_frame_size = - 2 + /* uvd_v6_0_ring_emit_hdp_flush */ - 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ + 6 + /* hdp invalidate */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ - 20 + /* uvd_v6_0_ring_emit_vm_flush */ + VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ .emit_ib = uvd_v6_0_ring_emit_ib, @@ -1144,10 +1592,38 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, - .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = uvd_v6_0_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, + .emit_wreg = uvd_v6_0_ring_emit_wreg, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_UVD_ENC, + .align_mask = 0x3f, + .nop = HEVC_ENC_CMD_NO_OP, + .support_64bit_ptrs = false, + .no_user_fence = true, + .get_rptr = uvd_v6_0_enc_ring_get_rptr, + .get_wptr = uvd_v6_0_enc_ring_get_wptr, + .set_wptr = uvd_v6_0_enc_ring_set_wptr, + .emit_frame_size = + 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ + 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ + 1, /* uvd_v6_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ + .emit_ib = uvd_v6_0_enc_ring_emit_ib, + .emit_fence = uvd_v6_0_enc_ring_emit_fence, + .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush, + .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync, + .test_ring = uvd_v6_0_enc_ring_test_ring, + .test_ib = uvd_v6_0_enc_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, + .insert_end = uvd_v6_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, @@ -1156,14 +1632,24 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { if (adev->asic_type >= CHIP_POLARIS10) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } else { - adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; DRM_INFO("UVD is enabled in physical mode\n"); } } +static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; + + DRM_INFO("UVD ENC is enabled in VM mode\n"); +} + static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { .set = uvd_v6_0_set_interrupt_state, .process = uvd_v6_0_process_interrupt, @@ -1171,8 +1657,12 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; + if (uvd_v6_0_enc_support(adev)) + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; + else + adev->uvd.inst->irq.num_types = 1; + + adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v6_0_ip_block = |
