summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vega20_ih.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c229
1 files changed, 139 insertions, 90 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index f51dfc38ac65..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -38,6 +38,11 @@
#define mmIH_CHICKEN_ALDEBARAN 0x18d
#define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0
+#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN 0x00ea
+#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN_BASE_IDX 0
+#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE__SHIFT 0x10
+#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE_MASK 0x00010000L
+
static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
@@ -109,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -251,36 +283,14 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
return 0;
}
-/**
- * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
- *
- * @adev: amdgpu_device pointer
- *
- * Reroute VMC and UMC interrupts on primary ih ring to
- * ih ring 1 so they won't lose when bunches of page faults
- * interrupts overwhelms the interrupt handler(VEGA20)
- */
-static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+static uint32_t vega20_setup_retry_doorbell(u32 doorbell_index)
{
- uint32_t tmp;
+ u32 val = 0;
- /* vega20 ih reroute will go through psp this
- * function is used for newer asics starting arcturus
- */
- if (adev->asic_type >= CHIP_ARCTURUS) {
- /* Reroute to IH ring 1 for VMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
- /* Reroute IH ring 1 for UTCL2 */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
- }
+ val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, OFFSET, doorbell_index);
+ val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, ENABLE, 1);
+
+ return val;
}
/**
@@ -308,40 +318,64 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
adev->nbio.funcs->ih_control(adev);
- if (adev->asic_type == CHIP_ARCTURUS &&
- adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_GPA_ENABLE, 1);
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
- WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
- }
- /* psp firmware won't program IH_CHICKEN for aldebaran
- * driver needs to program it properly according to
- * MC_SPACE type in IH_RB_CNTL */
- if (adev->asic_type == CHIP_ALDEBARAN) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_GPA_ENABLE, 1);
+ /* psp firmware won't program IH_CHICKEN for aldebaran
+ * driver needs to program it properly according to
+ * MC_SPACE type in IH_RB_CNTL */
+ if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) ||
+ (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2)) ||
+ (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 5))) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
}
- WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
}
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
- if (i == 1)
- vega20_ih_reroute_ih(adev);
ret = vega20_ih_enable_ring(adev, ih[i]);
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
+ /* Allocate the doorbell for IH Retry CAM */
+ adev->irq.retry_cam_doorbell_index = (adev->doorbell_index.ih + 3) << 1;
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RETRY_CAM,
+ vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index));
+
+ /* Enable IH Retry CAM */
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 5))
+ WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
+ ENABLE, 1);
+ else
+ WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL, ENABLE, 1);
+
+ adev->irq.retry_cam_enabled = true;
+
/* enable interrupts */
ret = vega20_ih_toggle_interrupts(adev, true);
if (ret)
@@ -385,9 +419,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -402,22 +438,30 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
@@ -461,6 +505,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -494,7 +541,8 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
case 2:
schedule_work(&adev->irq.ih2_work);
break;
- default: break;
+ default:
+ break;
}
return 0;
}
@@ -509,18 +557,19 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
}
-static int vega20_ih_early_init(void *handle)
+static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_set_interrupt_funcs(adev);
vega20_ih_set_self_irq_funcs(adev);
return 0;
}
-static int vega20_ih_sw_init(void *handle)
+static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+ bool use_bus_addr = true;
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -528,31 +577,38 @@ static int vega20_ih_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if ((adev->flags & AMD_IS_APU) &&
+ (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2)))
+ use_bus_addr = false;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, use_bus_addr);
if (r)
return r;
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) != IP_VERSION(4, 4, 2) &&
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) != IP_VERSION(4, 4, 5)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
/* initialize ih control registers offset */
vega20_ih_init_register_offset(adev);
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr);
if (r)
return r;
@@ -561,19 +617,19 @@ static int vega20_ih_sw_init(void *handle)
return r;
}
-static int vega20_ih_sw_fini(void *handle)
+static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int vega20_ih_hw_init(void *handle)
+static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = vega20_ih_irq_init(adev);
if (r)
@@ -582,42 +638,36 @@ static int vega20_ih_hw_init(void *handle)
return 0;
}
-static int vega20_ih_hw_fini(void *handle)
+static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega20_ih_irq_disable(adev);
+ vega20_ih_irq_disable(ip_block->adev);
return 0;
}
-static int vega20_ih_suspend(void *handle)
+static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_fini(adev);
+ return vega20_ih_hw_fini(ip_block);
}
-static int vega20_ih_resume(void *handle)
+static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_init(adev);
+ return vega20_ih_hw_init(ip_block);
}
-static bool vega20_ih_is_idle(void *handle)
+static bool vega20_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
}
-static int vega20_ih_wait_for_idle(void *handle)
+static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int vega20_ih_soft_reset(void *handle)
+static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -651,10 +701,10 @@ static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega20_ih_set_clockgating_state(void *handle,
+static int vega20_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -662,7 +712,7 @@ static int vega20_ih_set_clockgating_state(void *handle,
}
-static int vega20_ih_set_powergating_state(void *handle,
+static int vega20_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -671,7 +721,6 @@ static int vega20_ih_set_powergating_state(void *handle,
const struct amd_ip_funcs vega20_ih_ip_funcs = {
.name = "vega20_ih",
.early_init = vega20_ih_early_init,
- .late_init = NULL,
.sw_init = vega20_ih_sw_init,
.sw_fini = vega20_ih_sw_fini,
.hw_init = vega20_ih_hw_init,
@@ -688,6 +737,7 @@ const struct amd_ip_funcs vega20_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega20_ih_funcs = {
.get_wptr = vega20_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = vega20_ih_set_rptr
};
@@ -696,8 +746,7 @@ static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &vega20_ih_funcs;
}
-const struct amdgpu_ip_block_version vega20_ih_ip_block =
-{
+const struct amdgpu_ip_block_version vega20_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 4,
.minor = 2,