summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c978
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c705
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
49 files changed, 2378 insertions, 432 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7ae3b22c5628..c2bbcdd9c875 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -120,6 +120,7 @@ amdgpu-y += \
amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o \
+ gfx_v9_4.o \
gfx_v10_0.o
# add async DMA block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b1bb10625cd9..da3bcff61b97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1009,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
+#define AMDGPU_REGS_KIQ (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
+#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 82155ac3288a..12247a32f9ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = state == AMD_PG_STATE_GATE ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b2487f4f271b..fa8ac9d19a7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
return -ENOMEM;
mutex_init(&(*mem)->lock);
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 64e2babbc36e..94a6c42f29ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
-static int amdgpu_ctx_total_num_entities(void)
-{
- unsigned i, num_entities = 0;
-
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
- num_entities += amdgpu_ctx_num_entities[i];
-
- return num_entities;
-}
-
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
/* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
@@ -68,64 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum drm_sched_priority priority,
- struct drm_file *filp,
- struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
- unsigned i, j;
+ struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_ctx_entity *entity;
+ struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+ unsigned num_scheds = 0;
+ enum drm_sched_priority priority;
int r;
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
- return -EINVAL;
-
- r = amdgpu_ctx_priority_permit(filp, priority);
- if (r)
- return r;
+ entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
+ GFP_KERNEL);
+ if (!entity)
+ return -ENOMEM;
- memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
-
- ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
- sizeof(struct dma_fence*), GFP_KERNEL);
- if (!ctx->fences)
- return -ENOMEM;
-
- ctx->entities[0] = kcalloc(num_entities,
- sizeof(struct amdgpu_ctx_entity),
- GFP_KERNEL);
- if (!ctx->entities[0]) {
- r = -ENOMEM;
- goto error_free_fences;
- }
-
- for (i = 0; i < num_entities; ++i) {
- struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
-
- entity->sequence = 1;
- entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
- }
- for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
- ctx->entities[i] = ctx->entities[i - 1] +
- amdgpu_ctx_num_entities[i - 1];
-
- kref_init(&ctx->refcount);
- spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
-
- ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
- ctx->reset_counter_query = ctx->reset_counter;
- ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
- ctx->init_priority = priority;
- ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
-
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- struct drm_gpu_scheduler **scheds;
- struct drm_gpu_scheduler *sched;
- unsigned num_scheds = 0;
-
- switch (i) {
+ entity->sequence = 1;
+ priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+ switch (hw_ip) {
case AMDGPU_HW_IP_GFX:
sched = &adev->gfx.gfx_ring[0].sched;
scheds = &sched;
@@ -166,53 +119,90 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
scheds = adev->jpeg.jpeg_sched;
num_scheds = adev->jpeg.num_jpeg_sched;
break;
- }
-
- for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
- r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- priority, scheds,
- num_scheds, &ctx->guilty);
- if (r)
- goto error_cleanup_entities;
}
+ r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+ &ctx->guilty);
+ if (r)
+ goto error_free_entity;
+
+ ctx->entities[hw_ip][ring] = entity;
return 0;
-error_cleanup_entities:
- for (i = 0; i < num_entities; ++i)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
- kfree(ctx->entities[0]);
+error_free_entity:
+ kfree(entity);
-error_free_fences:
- kfree(ctx->fences);
- ctx->fences = NULL;
return r;
}
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ enum drm_sched_priority priority,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx)
+{
+ int r;
+
+ r = amdgpu_ctx_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->adev = adev;
+
+ kref_init(&ctx->refcount);
+ spin_lock_init(&ctx->ring_lock);
+ mutex_init(&ctx->lock);
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->reset_counter_query = ctx->reset_counter;
+ ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->init_priority = priority;
+ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+
+ return 0;
+
+}
+
+static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+
+ int i;
+
+ if (!entity)
+ return;
+
+ for (i = 0; i < amdgpu_sched_jobs; ++i)
+ dma_fence_put(entity->fences[i]);
+
+ kfree(entity);
+}
+
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
if (!adev)
return;
- for (i = 0; i < num_entities; ++i)
- for (j = 0; j < amdgpu_sched_jobs; ++j)
- dma_fence_put(ctx->entities[0][i].fences[j]);
- kfree(ctx->fences);
- kfree(ctx->entities[0]);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+ amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ ctx->entities[i][j] = NULL;
+ }
+ }
mutex_destroy(&ctx->lock);
-
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
+ int r;
+
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
@@ -229,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return -EINVAL;
}
- *entity = &ctx->entities[hw_ip][ring].entity;
+ if (ctx->entities[hw_ip][ring] == NULL) {
+ r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+ if (r)
+ return r;
+ }
+
+ *entity = &ctx->entities[hw_ip][ring]->entity;
return 0;
}
@@ -269,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
- unsigned num_entities;
- u32 i;
+ u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ if (!ctx->entities[i][j])
+ continue;
- num_entities = amdgpu_ctx_total_num_entities();
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+ drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+ }
+ }
amdgpu_ctx_fini(ref);
}
@@ -506,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
enum drm_sched_priority ctx_prio;
- unsigned i;
+ unsigned i, j;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
+ if (!ctx->entities[i][j])
+ continue;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_set_priority(entity, ctx_prio);
+ }
}
}
@@ -554,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
- entity = &ctx->entities[0][i].entity;
- timeout = drm_sched_entity_flush(entity, timeout);
+ entity = &ctx->entities[i][j]->entity;
+ timeout = drm_sched_entity_flush(entity, timeout);
+ }
}
}
mutex_unlock(&mgr->lock);
@@ -576,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
@@ -589,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_fini(&ctx->entities[0][i].entity);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
+
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_fini(entity);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 4ad90a44dc3c..de490f183af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -29,10 +29,12 @@ struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
+#define AMDGPU_MAX_ENTITY_NUM 4
+
struct amdgpu_ctx_entity {
uint64_t sequence;
- struct dma_fence **fences;
struct drm_sched_entity entity;
+ struct dma_fence *fences[];
};
struct amdgpu_ctx {
@@ -42,8 +44,7 @@ struct amdgpu_ctx {
unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
- struct dma_fence **fences;
- struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
+ struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 53d882000101..39cd545976b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -216,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_rreg(adev, reg);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_rreg(adev, reg);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -294,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_wreg(adev, reg, v);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_wreg(adev, reg, v);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -985,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
struct sysinfo si;
- bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+ bool is_os_64 = (sizeof(void *) == 8);
uint64_t total_memory;
uint64_t dram_size_seven_GB = 0x1B8000000;
uint64_t dram_size_three_GB = 0xB8000000;
@@ -3760,6 +3760,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_RAVEN:
case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
break;
default:
goto disabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 61a26c15c8dd..057f6ea645d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,6 +52,9 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
+ uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
+ uint32_t df_inst);
+ uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b88b8b82bb64..0f960b498792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
if (r)
return r;
@@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -658,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_rreg(ring, reg);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return adev->wb.wb[kiq->reg_val_offs];
+
+failed_kiq_read:
+ pr_err("failed to read reg:%x\n", reg);
+ return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_wreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_write;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_write;
+
+ return;
+
+failed_kiq_write:
+ pr_err("failed to write reg:%x\n", reg);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index af4bd279f42f..ca17ffb01301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -94,6 +94,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
+ uint32_t reg_val_offs;
};
/*
@@ -375,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 86267baca07c..d3c27a3c43f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -60,11 +60,6 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
-/*
- * Default stolen memory size, 1024 * 768 * 4
- */
-#define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE 0x300000ULL
-
struct firmware;
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3265487b859f..611021514c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -172,6 +172,8 @@ struct psp_dtm_context {
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
enum psp_memory_training_init_flag {
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 766be7f18282..cef94e2169fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -742,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
+uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+{
+ uint32_t df_inst_id;
+
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_df_inst_id) ||
+ (!adev->df.funcs->get_dram_base_addr))
+ return addr;
+
+ df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+
+ return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -759,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
if (!obj)
return -EINVAL;
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ block_info.address = get_xgmi_relative_phy_addr(adev,
+ block_info.address);
+ }
+
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
if (adev->gfx.funcs->ras_error_inject)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ceb0dbf685f1..59ddba137946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -652,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
if ((addr + (uint64_t)size) >
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c4984c5fb2db..d6deb0eb1e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -65,33 +65,33 @@
/* 1 second timeout */
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
- ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
- RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \
+ RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \
do { \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
} while (0)
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
bool video_range, aon_range; \
\
- addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \
@@ -111,7 +111,7 @@
#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
({ \
- WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 103033f96f13..adc813cde8e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->pg_flags = 0;
}
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_rreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_read;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_read;
-
- return adev->wb.wb[adev->virt.reg_val_offs];
-
-failed_kiq_read:
- pr_err("failed to read reg:%x\n", reg);
- return ~0;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_wreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_write;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_write;
-
- return;
-
-failed_kiq_write:
- pr_err("failed to write reg:%x\n", reg);
-}
-
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 4d1ac7612967..daaf909d009a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t rreg1,
uint32_t ref, uint32_t mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index d9cc746af5e6..847ca9b3ce4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA20:
case CHIP_RAVEN:
athub_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index ceb9aa4df0e7..921a69abda55 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_NAVI14:
case CHIP_NAVI12:
athub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index f51326598a8c..5a1bd8ed1a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -27,6 +27,9 @@
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
+#define DF_3_6_SMN_REG_INST_DIST 0x8
+#define DF_3_6_INST_CNT 8
+
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
@@ -683,6 +686,58 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
+static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
+ uint32_t df_inst)
+{
+ uint32_t base_addr_reg_val = 0;
+ uint64_t base_addr = 0;
+
+ base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
+ df_inst * DF_3_6_SMN_REG_INST_DIST);
+
+ if (REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ AddrRngVal) == 0) {
+ DRM_WARN("address range not valid");
+ return 0;
+ }
+
+ base_addr = REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ DramBaseAddr);
+
+ return base_addr << 28;
+}
+
+static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
+{
+ uint32_t xgmi_node_id = 0;
+ uint32_t df_inst_id = 0;
+
+ /* Walk through DF dst nodes to find current XGMI node */
+ for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
+
+ xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
+ df_inst_id * DF_3_6_SMN_REG_INST_DIST);
+ xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
+ DF_CS_UMC_AON0_DramLimitAddress0,
+ DstFabricID);
+
+ /* TODO: establish reason dest fabric id is offset by 7 */
+ xgmi_node_id = xgmi_node_id >> 7;
+
+ if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
+ break;
+ }
+
+ if (df_inst_id == DF_3_6_INST_CNT) {
+ DRM_WARN("cant match df dst id with gpu node");
+ return 0;
+ }
+
+ return df_inst_id;
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -696,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_stop = df_v3_6_pmc_stop,
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
- .set_fica = df_v3_6_set_fica
+ .set_fica = df_v3_6_set_fica,
+ .get_dram_base_addr = df_v3_6_get_dram_base_addr,
+ .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 874f641de281..1785fdad6ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,7 +368,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
- .invalidate_tlbs_size = 12,
+ .invalidate_tlbs_size = 2,
};
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -4229,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4255,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
gfx_v10_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4737,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4745,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 46f0533ba43f..fa245973de12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6449,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6457,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 46ab46757b25..90f64b8bc358 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -48,6 +48,8 @@
#include "amdgpu_ras.h"
+#include "gfx_v9_4.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -736,6 +738,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
@@ -859,7 +862,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
- .invalidate_tlbs_size = 12,
+ .invalidate_tlbs_size = 2,
};
static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -1159,18 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+struct amdgpu_gfxoff_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
+ if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 ||
- adev->pdev->device == 0x15d8) &&
- (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
- !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ ((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
@@ -1949,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.query_ras_error_count = &gfx_v9_0_query_ras_error_count
};
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+};
+
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -2000,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARCTURUS:
+ adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2390,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+ DISABLE_BARRIER_WAITCNT, 1);
+ WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+ break;
+ default:
+ break;
+ };
+}
+
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -2435,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_init_compute_vmid(adev);
gfx_v9_0_init_gds_vmid(adev);
+ gfx_v9_0_init_sq_config(adev);
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -4029,7 +4097,7 @@ static const struct soc15_reg_entry sgpr2_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -4118,7 +4186,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- int r, i, j, k;
+ int r, i;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
@@ -4264,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- /* read back registers to clear the counters */
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0x0, k);
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
- }
- }
+ switch (adev->asic_type)
+ {
+ case CHIP_VEGA20:
+ gfx_v9_0_clear_ras_edc_counter(adev);
+ break;
+ case CHIP_ARCTURUS:
+ gfx_v9_4_clear_ras_edc_counter(adev);
+ break;
+ default:
+ break;
}
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
- mutex_unlock(&adev->grbm_idx_mutex);
fail:
amdgpu_ib_free(adev, &ib, NULL);
@@ -4638,7 +4705,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_RAVEN:
@@ -4700,7 +4767,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
case CHIP_ARCTURUS:
case CHIP_RENOIR:
gfx_v9_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4717,12 +4784,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -4731,18 +4798,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
if (adev->asic_type != CHIP_ARCTURUS) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
@@ -5213,6 +5280,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -5221,9 +5289,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5545,7 +5613,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
}
-static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = {
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -5993,7 +6061,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
int ret;
struct ta_ras_trigger_error_input block_info = { 0 };
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
@@ -6118,7 +6186,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
@@ -6137,7 +6205,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
@@ -6158,7 +6226,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
@@ -6170,7 +6238,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
@@ -6197,36 +6265,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
- for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) {
- if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset ||
- gc_ras_fields_vg20[i].seg != reg->seg ||
- gc_ras_fields_vg20[i].inst != reg->inst)
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+ if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_0_ras_fields[i].seg != reg->seg ||
+ gfx_v9_0_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = (value &
- gc_ras_fields_vg20[i].sec_count_mask) >>
- gc_ras_fields_vg20[i].sec_count_shift;
+ gfx_v9_0_ras_fields[i].sec_count_mask) >>
+ gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
- gc_ras_fields_vg20[i].name,
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
- gc_ras_fields_vg20[i].ded_count_mask) >>
- gc_ras_fields_vg20[i].ded_count_shift;
+ gfx_v9_0_ras_fields[i].ded_count_mask) >>
+ gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
- gc_ras_fields_vg20[i].name,
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -6236,6 +6304,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ /* read back registers to clear the counters */
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
@@ -6244,7 +6364,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
uint32_t i, j, k;
uint32_t reg_value;
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
err_data->ue_count = 0;
@@ -6252,14 +6372,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
gfx_v9_0_select_se_sh(adev, j, 0, k);
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- __get_ras_error_count(&sec_ded_counter_registers[i],
+ gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
j, k, reg_value,
&sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
new file mode 100644
index 000000000000..f099f13d7f1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
+
+#include "gc/gc_9_4_1_offset.h"
+#include "gc/gc_9_4_1_sh_mask.h"
+#include "soc15_common.h"
+
+#include "gfx_v9_4.h"
+#include "amdgpu_ras.h"
+
+static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
+ /* CPC */
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
+ /* DC */
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
+ /* CPF */
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
+ /* GDS */
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+ /* SPI */
+ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
+ /* SQ */
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+ /* SQC */
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
+ /* TA */
+ { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
+ /* TCA */
+ { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
+ /* TCC */
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
+ /* TCI */
+ { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
+ /* TCP */
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
+ /* TD */
+ { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
+ /* GCEA */
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
+ /* RLC */
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
+};
+
+static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+ instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
+ /* CPC */
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+ { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_RESTORE_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+ { "CPC_DC_RESTORE_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+ /* CPF */
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+ { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+ /* GDS */
+ { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+ { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+
+ /* SPI */
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+ { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+ { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+ { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
+ { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+ /* SQ */
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+ /* SQC */
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+
+ /* TA */
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+
+ /* TCA */
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+ /* TCC */
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+
+ /* TCI */
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+ /* TCP */
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+ /* TD */
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+ /* EA */
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+ { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+ { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+ { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+ { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+ { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+ { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+ { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+ { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+ { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+
+ /* RLC */
+ { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+ { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+ { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+ { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+ { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+ { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+ { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+};
+
+static const char * const vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+ "UTC_VML2_IFIFO_GROUP0",
+ "UTC_VML2_IFIFO_GROUP1",
+ "UTC_VML2_IFIFO_GROUP2",
+ "UTC_VML2_IFIFO_GROUP3",
+ "UTC_VML2_IFIFO_GROUP4",
+ "UTC_VML2_IFIFO_GROUP5",
+ "UTC_VML2_IFIFO_GROUP6",
+ "UTC_VML2_IFIFO_GROUP7",
+ "UTC_VML2_IFIFO_GROUP8",
+ "UTC_VML2_IFIFO_GROUP9",
+ "UTC_VML2_IFIFO_GROUP10",
+ "UTC_VML2_IFIFO_GROUP11",
+ "UTC_VML2_IFIFO_GROUP12",
+ "UTC_VML2_IFIFO_GROUP13",
+ "UTC_VML2_IFIFO_GROUP14",
+ "UTC_VML2_IFIFO_GROUP15",
+ "UTC_VML2_IFIFO_GROUP16",
+ "UTC_VML2_IFIFO_GROUP17",
+ "UTC_VML2_IFIFO_GROUP18",
+ "UTC_VML2_IFIFO_GROUP19",
+ "UTC_VML2_IFIFO_GROUP20",
+ "UTC_VML2_IFIFO_GROUP21",
+ "UTC_VML2_IFIFO_GROUP22",
+ "UTC_VML2_IFIFO_GROUP23",
+ "UTC_VML2_IFIFO_GROUP24",
+};
+
+static const char * const vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_ARADDRS",
+ "UTC_VML2_RDIF_LOG_FIFO",
+ "UTC_VML2_QUEUE_REQ",
+ "UTC_VML2_QUEUE_RET",
+};
+
+static const char * const utcl2_router_mems[] = {
+ "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
+ "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
+ "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
+ "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
+ "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
+ "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
+ "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
+ "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
+ "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
+ "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
+ "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
+ "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
+ "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
+ "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
+ "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
+ "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
+ "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
+ "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
+ "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
+ "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
+ "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
+ "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
+ "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
+ "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
+ "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
+};
+
+static const char * const atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char * const atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ utcl2_router_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ utcl2_router_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_2m_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+
+ return 0;
+}
+
+static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id,
+ uint32_t value, uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
+ if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_4_ras_fields[i].seg != reg->seg ||
+ gfx_v9_4_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
+ gfx_v9_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
+ gfx_v9_4_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0, k);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ if (reg_value)
+ gfx_v9_4_ras_error_count(
+ &gfx_v9_4_edc_counter_regs[i],
+ j, k, reg_value, &sec_count,
+ &ded_count);
+ }
+ }
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v9_4_query_utc_edc_status(adev, err_data);
+
+ return 0;
+}
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+}
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index = info->head.sub_block_index;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
new file mode 100644
index 000000000000..2e3f6f755ad4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_H__
+#define __GFX_V9_4_H__
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if);
+
+#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index bbede09983e1..9775eca6fe43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -262,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 tmp;
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
@@ -289,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -418,7 +419,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -441,10 +443,10 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (all_hub) {
for (i = 0; i < adev->num_vmhubs; i++)
gmc_v10_0_flush_gpu_tlb(adev, vmid,
- i, 0);
+ i, flush_type);
} else {
gmc_v10_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
+ AMDGPU_GFXHUB_0, flush_type);
}
break;
}
@@ -640,12 +642,7 @@ static int gmc_v10_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- /*
- * Can't free the stolen VGA memory when it might be used for memory
- * training again.
- */
- if (!adev->fw_vram_usage.mem_train_support)
- amdgpu_bo_late_init(adev);
+ amdgpu_bo_late_init(adev);
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
@@ -829,19 +826,6 @@ static int gmc_v10_0_sw_init(void *handle)
adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
- /*
- * In dual GPUs scenario, stolen_size is assigned to zero on the
- * secondary GPU, since there is no pre-OS console using that memory.
- * Then the bottom region of VRAM was allocated as GTT, unfortunately a
- * small region of bottom VRAM was encroached by UMC firmware during
- * GDDR6 BIST training, this cause page fault.
- * The page fault can be fixed by forcing stolen_size to 3MB, then the
- * bottom region of VRAM was allocated as stolen memory, GTT corruption
- * avoid.
- */
- adev->gmc.stolen_size = max(adev->gmc.stolen_size,
- AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE);
-
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
@@ -881,13 +865,6 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
static int gmc_v10_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *stolen_vga_buf;
-
- /*
- * Free the stolen memory if it wasn't already freed in late_init
- * because of memory training.
- */
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
amdgpu_vm_manager_fini(adev);
gmc_v10_0_gart_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 19d5b133e1d7..9da9596a3638 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU &&
+ adev->gmc.real_vram_size > adev->gmc.aper_size) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 40a496804356..90216abf14a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, tmp;
+ u32 j, inv_req, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -493,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t req = hub->vm_inv_eng0_req + eng;
uint32_t ack = hub->vm_inv_eng0_ack + eng;
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
return;
}
@@ -521,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -578,7 +578,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (ring->sched.ready) {
spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -601,10 +602,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (all_hub) {
for (i = 0; i < adev->num_vmhubs; i++)
gmc_v9_0_flush_gpu_tlb(adev, vmid,
- i, 0);
+ i, flush_type);
} else {
gmc_v9_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
+ AMDGPU_GFXHUB_0, flush_type);
}
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index a78292d84854..ff2e6e1ccde7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
if (jpeg_v2_0_is_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 2c58939e6ad0..c6d046df4b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index adfd8a6171eb..49a3a56ec017 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_RAVEN:
case CHIP_RENOIR:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v1_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index a7cb185d639a..bde189680521 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_NAVI14:
case CHIP_NAVI12:
mmhub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 5c42387c9274..a5281df8d84f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -625,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_ARCTURUS:
mmhub_v9_4_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v9_4_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -663,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+ /* MMHUB Range 0 */
{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -751,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
+ { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 1 */
{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -838,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHAB Range 2*/
+ { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Rang 3 */
+ { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 4 */
+ { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUAB Range 5 */
+ { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 6 */
+ { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 7*/
+ { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
}
};
static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index f737ce459c28..cf557a428298 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -426,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_update_clockgating_state(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2e0f8933410e..2d1bebdf1603 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -950,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 685dd9754c67..0829188c1a5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -971,10 +972,13 @@ Err_out:
*/
static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
{
- int ret;
- uint32_t p2c_header[4];
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret;
if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
DRM_DEBUG("Memory training is not supported.\n");
@@ -989,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return 0;
}
- amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
pcache[0], pcache[1], pcache[2], pcache[3],
p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
@@ -1026,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
DRM_DEBUG("Memory training ops:%x.\n", ops);
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long traing will encroach certain mount of bottom VRAM,
+ * saving the content of this bottom VRAM to system memory
+ * before training, and restoring it after training to avoid
+ * VRAM corruption.
+ */
+ sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ DRM_ERROR("failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
return ret;
}
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
+ vfree(buf);
}
if (ops & PSP_MEM_TRAIN_SAVE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 27c7001be1ee..e55884d204bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2176,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
case CHIP_ARCTURUS:
case CHIP_RENOIR:
sdma_v4_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v4_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 4c6bf1f8a528..67b9830b7c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1525,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
sdma_v5_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 9aac9f9c50bb..42d5601b6bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle,
bool enable;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ enable = (state == AMD_CG_STATE_GATE);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 317803f6a561..15f3424a1ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -537,6 +537,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
+ /* original raven doesn't have full asic reset */
+ if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ return 0;
+
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
if (!adev->in_suspend)
@@ -1467,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA12:
case CHIP_VEGA20:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->df.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 01e62fb8e6e0..0fa8aae2d78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 217084d56ab8..e0aadcaf6c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 475ae68f38f5..217db187207c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 683701cf7270..3fd102efb7af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if ((adev->asic_type == CHIP_POLARIS10) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index e654938f6cca..1a24fadd30e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1346,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f4db8af6536b..4f7216788f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1213,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
@@ -1624,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index ef749b02ded9..6c9de1882428 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c8b63d57a541..70fae7977f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -435,88 +435,88 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -670,19 +670,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+ UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+ UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+ UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+ UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
@@ -772,11 +772,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
+ UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -788,28 +788,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
+ UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MPC_CNTL),
+ UVD, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MPC_SET_MUXA0),
+ UVD, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MPC_SET_MUXB0),
+ UVD, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MPC_SET_MUX),
+ UVD, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -817,26 +817,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+ UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+ UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, inst_idx, mmUVD_MASTINT_EN),
+ UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
@@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
@@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return 0;
+
/*SW clock gating */
vcn_v2_5_disable_clock_gating(adev);
@@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_5_stop_dpg_mode(adev, i);
- goto power_off;
+ continue;
}
/* wait for vcn idle */
@@ -1349,7 +1353,6 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
-power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
@@ -1488,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_dec_ring_test_ring,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,
@@ -1663,7 +1666,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index d9e331084ea0..407c6093c2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -717,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vega10_ih_update_clockgating_state(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
return 0;
}