summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c120
1 files changed, 77 insertions, 43 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index d69c8f6daaf8..4b96fd583772 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "cikd.h"
@@ -44,7 +44,7 @@ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
bool sw_mode);
@@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
-static int uvd_v4_2_early_init(void *handle)
+static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev);
@@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle)
return 0;
}
-static int uvd_v4_2_sw_init(void *handle)
+static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* UVD TRAP */
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -126,15 +127,13 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_entity_init(adev);
-
return r;
}
-static int uvd_v4_2_sw_fini(void *handle)
+static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -148,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
/**
* uvd_v4_2_hw_init - start and test UVD block
*
- * @adev: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v4_2_hw_init(void *handle)
+static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -203,45 +202,74 @@ done:
/**
* uvd_v4_2_hw_fini - stop the hardware block
*
- * @adev: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v4_2_hw_fini(void *handle)
+static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
-static int uvd_v4_2_suspend(void *handle)
+static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ return amdgpu_uvd_prepare_suspend(adev);
+}
+
+static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_uvd(adev, false);
+ } else {
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ }
- r = uvd_v4_2_hw_fini(adev);
+ r = uvd_v4_2_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v4_2_resume(void *handle)
+static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v4_2_hw_init(adev);
+ return uvd_v4_2_hw_init(ip_block);
}
/**
@@ -274,7 +302,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
- /* disable interupt */
+ /* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -284,6 +312,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x203108);
@@ -350,7 +379,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -439,7 +468,9 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
* uvd_v4_2_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
- * @fence: fence to emit
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
@@ -491,7 +522,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -504,14 +535,16 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* uvd_v4_2_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
+ * @job: iob associated with the indirect buffer
* @ib: indirect buffer to execute
+ * @flags: flags associated with the indirect buffer
*
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
- bool ctx_switch)
+ uint32_t flags)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
@@ -543,7 +576,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
@@ -626,17 +659,17 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static bool uvd_v4_2_is_idle(void *handle)
+static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v4_2_wait_for_idle(void *handle)
+static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -645,9 +678,9 @@ static int uvd_v4_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v4_2_soft_reset(void *handle)
+static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v4_2_stop(adev);
@@ -676,13 +709,13 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v4_2_set_powergating_state(void *handle,
+static int uvd_v4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -692,7 +725,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
@@ -723,11 +756,11 @@ static int uvd_v4_2_set_powergating_state(void *handle,
static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
- .late_init = NULL,
.sw_init = uvd_v4_2_sw_init,
.sw_fini = uvd_v4_2_sw_fini,
.hw_init = uvd_v4_2_hw_init,
.hw_fini = uvd_v4_2_hw_fini,
+ .prepare_suspend = uvd_v4_2_prepare_suspend,
.suspend = uvd_v4_2_suspend,
.resume = uvd_v4_2_resume,
.is_idle = uvd_v4_2_is_idle,
@@ -741,6 +774,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,