summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
diff options
context:
space:
mode:
authorAmber Lin <Amber.Lin@amd.com>2022-05-17 23:41:01 +0800
committerAlex Deucher <alexander.deucher@amd.com>2023-06-09 09:40:40 -0400
commitf544afac3f34124088b981c63843a3cc48f4ee3e (patch)
tree0ddb1c55d9b67cd3b0d3359e2a1c1841cff66623 /drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
parent62e790879efbf09edb9f262d5eb7765aeaf89809 (diff)
drm/amdgpu: Add kgd2kfd for GC 9.4.3
New GC (v9.4.3) and ATHUB (v1.8.0) versions are used. Add kgd_gfx_v9_4_3_* functions if registers in use of kgd_gfx_v9_* functions are changed or have different offset. Signed-off-by: Amber Lin <Amber.Lin@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Mukul Joshi <mukul.joshi@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index ae06d1f2af93..d36219ecd3dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -59,7 +59,7 @@ static void unlock_srbm(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
@@ -68,7 +68,7 @@ static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static uint64_t get_queue_mask(struct amdgpu_device *adev,
+uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
@@ -77,7 +77,7 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct amdgpu_device *adev)
+void kgd_gfx_v9_release_queue(struct amdgpu_device *adev)
{
unlock_srbm(adev);
}
@@ -228,7 +228,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
m = get_mqd(mqd);
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -280,7 +280,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
- (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -291,7 +291,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
return 0;
}
@@ -307,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
m = get_mqd(mqd);
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -343,7 +343,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq[0].ring_lock);
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
return r;
}
@@ -365,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -487,7 +487,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
bool retval = false;
uint32_t low, high;
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id);
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -497,7 +497,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
return retval;
}
@@ -532,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -561,13 +561,13 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev);
return 0;
}