diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c | 108 |
1 files changed, 104 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c index 15eb3658d70e..f89e5f40e1a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c @@ -21,11 +21,11 @@ * */ #include "amdgpu.h" -#include "amdgpu_atombios.h" #include "nbio_v4_3.h" #include "nbio/nbio_4_3_0_offset.h" #include "nbio/nbio_4_3_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev) @@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = { static void nbio_v4_3_init_registers(struct amdgpu_device *adev) { - return; + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(4, 3, 0)) { + uint32_t data; + + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2); + data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK; + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data); + } } static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev) @@ -382,8 +388,8 @@ static void nbio_v4_3_program_aspm(struct amdgpu_device *adev) #ifdef CONFIG_PCIEASPM uint32_t def, data; - if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) && - !(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0))) + if (!(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 4, 0)) && + !(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 6, 0))) return; def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL); @@ -465,6 +471,20 @@ static void nbio_v4_3_program_aspm(struct amdgpu_device *adev) #endif } +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) + +static void nbio_v4_3_set_reg_remap(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } else { + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; + adev->rmmio_remap.bus_addr = 0; + } +} + const struct amdgpu_nbio_funcs nbio_v4_3_funcs = { .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset, @@ -487,6 +507,7 @@ const struct amdgpu_nbio_funcs nbio_v4_3_funcs = { .remap_hdp_registers = nbio_v4_3_remap_hdp_registers, .get_rom_offset = nbio_v4_3_get_rom_offset, .program_aspm = nbio_v4_3_program_aspm, + .set_reg_remap = nbio_v4_3_set_reg_remap, }; @@ -531,4 +552,83 @@ const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = { .init_registers = nbio_v4_3_init_registers, .remap_hdp_registers = nbio_v4_3_remap_hdp_registers, .get_rom_offset = nbio_v4_3_get_rom_offset, + .set_reg_remap = nbio_v4_3_set_reg_remap, +}; + +static int nbio_v4_3_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_doorbell_int_cntl; + + bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1); + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); + + return 0; +} + +static int nbio_v4_3_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to bif ring. since bif ring is not enabled, just leave process callback + * as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v4_3_ras_err_event_athub_irq_funcs = { + .set = nbio_v4_3_set_ras_err_event_athub_irq_state, + .process = nbio_v4_3_process_err_event_athub_irq, +}; + +static void nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_int_cntl; + + bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_int_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); + amdgpu_ras_global_ras_isr(adev); + } +} + +static int nbio_v4_3_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v4_3_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt + * nbio v4_3 uses the same irq source as nbio v7_4 */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +struct amdgpu_nbio_ras nbio_v4_3_ras = { + .handle_ras_err_event_athub_intr_no_bifring = nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_err_event_athub_interrupt = nbio_v4_3_init_ras_err_event_athub_interrupt, }; |
