summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c139
1 files changed, 79 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 23b478639921..8030fcd64210 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -43,7 +43,7 @@
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v6_0_wait_for_idle(void *handle);
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
@@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
+ struct amdgpu_ip_block *ip_block;
- gmc_v6_0_wait_for_idle((void *)adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v6_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -98,9 +103,7 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
- bool is_58_fw = false;
DRM_DEBUG("\n");
@@ -126,17 +129,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
/* this memory configuration requires special firmware */
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
- is_58_fw = true;
+ chip_name = "si58";
- if (is_58_fw)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
dev_err(adev->dev,
- "si_mc: Failed to load firmware \"%s\"\n",
- fw_name);
+ "si_mc: Failed to load firmware \"%s_mc.bin\"\n",
+ chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
@@ -219,6 +219,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
int i, j;
+ struct amdgpu_ip_block *ip_block;
+
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
@@ -230,7 +232,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -243,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -257,7 +263,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
@@ -621,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
"write" : "read", block, mc_client, mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -666,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -683,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -699,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -722,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -738,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -746,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -768,9 +772,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v6_0_early_init(void *handle)
+static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
@@ -778,9 +782,9 @@ static int gmc_v6_0_early_init(void *handle)
return 0;
}
-static int gmc_v6_0_late_init(void *handle)
+static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -805,10 +809,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static int gmc_v6_0_sw_init(void *handle)
+static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -882,9 +886,9 @@ static int gmc_v6_0_sw_init(void *handle)
return 0;
}
-static int gmc_v6_0_sw_fini(void *handle)
+static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -895,10 +899,10 @@ static int gmc_v6_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_hw_init(void *handle)
+static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_mc_program(adev);
@@ -920,9 +924,9 @@ static int gmc_v6_0_hw_init(void *handle)
return 0;
}
-static int gmc_v6_0_hw_fini(void *handle)
+static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
@@ -930,21 +934,19 @@ static int gmc_v6_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_suspend(void *handle)
+static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v6_0_hw_fini(adev);
+ gmc_v6_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v6_0_resume(void *handle)
+static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- r = gmc_v6_0_hw_init(adev);
+ r = gmc_v6_0_hw_init(ip_block);
if (r)
return r;
@@ -953,9 +955,10 @@ static int gmc_v6_0_resume(void *handle)
return 0;
}
-static bool gmc_v6_0_is_idle(void *handle)
+static bool gmc_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -965,13 +968,13 @@ static bool gmc_v6_0_is_idle(void *handle)
return true;
}
-static int gmc_v6_0_wait_for_idle(void *handle)
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(handle))
+ if (gmc_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -979,9 +982,10 @@ static int gmc_v6_0_wait_for_idle(void *handle)
}
-static int gmc_v6_0_soft_reset(void *handle)
+static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -998,7 +1002,8 @@ static int gmc_v6_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev);
- if (gmc_v6_0_wait_for_idle(adev))
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
@@ -1088,13 +1093,27 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v6_0_set_clockgating_state(void *handle,
+static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
-static int gmc_v6_0_set_powergating_state(void *handle,
+static int gmc_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;