diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vce_v4_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 612 |
1 files changed, 200 insertions, 412 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1ecd6bb90c1f..2d64002bed61 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -25,19 +25,22 @@ */ #include <linux/firmware.h> -#include <drm/drmP.h> +#include <drm/drm_drv.h> + #include "amdgpu.h" #include "amdgpu_vce.h" +#include "soc15.h" #include "soc15d.h" #include "soc15_common.h" #include "mmsch_v1_0.h" -#include "vega10/soc15ip.h" -#include "vega10/VCE/vce_4_0_offset.h" -#include "vega10/VCE/vce_4_0_default.h" -#include "vega10/VCE/vce_4_0_sh_mask.h" -#include "vega10/MMHUB/mmhub_1_0_offset.h" -#include "vega10/MMHUB/mmhub_1_0_sh_mask.h" +#include "vce/vce_4_0_offset.h" +#include "vce/vce_4_0_default.h" +#include "vce/vce_4_0_sh_mask.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h" + +#include "ivsrcid/vce/irqsrcs_vce_4_0.h" #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 @@ -60,9 +63,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vce.ring[0]) + if (ring->me == 0) return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR)); - else if (ring == &adev->vce.ring[1]) + else if (ring->me == 1) return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2)); else return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3)); @@ -80,11 +83,11 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; + return *ring->wptr_cpu_addr; - if (ring == &adev->vce.ring[0]) + if (ring->me == 0) return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR)); - else if (ring == &adev->vce.ring[1]) + else if (ring->me == 1) return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2)); else return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3)); @@ -103,15 +106,15 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); return; } - if (ring == &adev->vce.ring[0]) + if (ring->me == 0) WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr)); - else if (ring == &adev->vce.ring[1]) + else if (ring->me == 1) WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr)); else @@ -173,6 +176,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); + WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); + *adev->vce.ring[0].wptr_cpu_addr = 0; + adev->vce.ring[0].wptr = 0; + adev->vce.ring[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); @@ -190,7 +198,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); return 0; } @@ -238,43 +245,61 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); + offset = AMDGPU_VCE_FIRMWARE_OFFSET; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), - adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), - adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), - adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8); + uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo; + uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi; + uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low; + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), + (tmr_mc_addr >> 40) & 0xff); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0); } else { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), adev->vce.gpu_addr >> 8); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), + (adev->vce.gpu_addr >> 40) & 0xff); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), + offset & ~0x0f000000); + + } + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), adev->vce.gpu_addr >> 8); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), + (adev->vce.gpu_addr >> 40) & 0xff); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), adev->vce.gpu_addr >> 8); - } + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, + mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), + (adev->vce.gpu_addr >> 40) & 0xff); - offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V4_0_FW_SIZE; - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), - offset & 0x7FFFFFFF); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); - offset += size; + offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0; size = VCE_V4_0_STACK_SIZE; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), - offset & 0x7FFFFFFF); + (offset & ~0x0f000000) | (1 << 24)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size); offset += size; size = VCE_V4_0_DATA_SIZE; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), - offset & 0x7FFFFFFF); + (offset & ~0x0f000000) | (2 << 24)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), - 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); /* end of MC_RESUME */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), @@ -296,11 +321,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->vce_table_size = table_size; - - return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } /** @@ -365,6 +388,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev) static int vce_v4_0_stop(struct amdgpu_device *adev) { + /* Disable VCPU */ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001); /* hold on ECPU */ @@ -372,8 +396,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - /* clear BUSY flag */ - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK); + /* clear VCE_STATUS */ + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0); /* Set Clock-Gating off */ /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -383,9 +407,14 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) return 0; } -static int vce_v4_0_early_init(void *handle) +static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; @@ -398,18 +427,19 @@ static int vce_v4_0_early_init(void *handle) return 0; } -static int vce_v4_0_sw_init(void *handle) +static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; + unsigned size; int r, i; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq); if (r) return r; - size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2; + size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) size += VCE_V4_0_FW_SIZE; @@ -421,7 +451,7 @@ static int vce_v4_0_sw_init(void *handle) const struct common_firmware_header *hdr; unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - adev->vce.saved_bo = kmalloc(size, GFP_KERNEL); + adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL); if (!adev->vce.saved_bo) return -ENOMEM; @@ -438,19 +468,25 @@ static int vce_v4_0_sw_init(void *handle) } for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ ring->use_doorbell = true; + + /* currently only use the first encoding ring for sriov, + * so set unused location for other unused rings. + */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2; - else if (i == 1) - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2; + ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1; + ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); if (r) return r; } @@ -462,16 +498,16 @@ static int vce_v4_0_sw_init(void *handle) return r; } -static int vce_v4_0_sw_fini(void *handle) +static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* free MM table */ amdgpu_virt_free_mm_table(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - kfree(adev->vce.saved_bo); + kvfree(adev->vce.saved_bo); adev->vce.saved_bo = NULL; } @@ -482,10 +518,10 @@ static int vce_v4_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v4_0_hw_init(void *handle) +static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) r = vce_v4_0_sriov_start(adev); @@ -494,15 +530,10 @@ static int vce_v4_0_hw_init(void *handle) if (r) return r; - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; - for (i = 0; i < adev->vce.num_rings; i++) { - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); if (r) return r; - else - adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); @@ -510,72 +541,101 @@ static int vce_v4_0_hw_init(void *handle) return 0; } -static int vce_v4_0_hw_fini(void *handle) +static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + struct amdgpu_device *adev = ip_block->adev; + + cancel_delayed_work_sync(&adev->vce.idle_work); if (!amdgpu_sriov_vf(adev)) { - /* vce_v4_0_wait_for_idle(handle); */ + /* vce_v4_0_wait_for_idle(ip_block); */ vce_v4_0_stop(adev); } else { /* full access mode, so don't touch any VCE register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; - return 0; } -static int vce_v4_0_suspend(void *handle) +static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, idx; if (adev->vce.vcpu_bo == NULL) return 0; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; - memcpy_fromio(adev->vce.saved_bo, ptr, size); + memcpy_fromio(adev->vce.saved_bo, ptr, size); + } + drm_dev_exit(idx); } - r = vce_v4_0_hw_fini(adev); + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + + r = vce_v4_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v4_0_resume(void *handle) +static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; - memcpy_toio(ptr, adev->vce.saved_bo, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; + + memcpy_toio(ptr, adev->vce.saved_bo, size); + drm_dev_exit(idx); + } } else { r = amdgpu_vce_resume(adev); if (r) return r; } - return vce_v4_0_hw_init(adev); + return vce_v4_0_hw_init(ip_block); } static void vce_v4_0_mc_resume(struct amdgpu_device *adev) { uint32_t offset, size; + uint64_t tmr_mc_addr; WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16)); WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000); @@ -588,21 +648,25 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev) WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); + offset = AMDGPU_VCE_FIRMWARE_OFFSET; + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 | + adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo; WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8)); + (tmr_mc_addr >> 8)); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff); + (tmr_mc_addr >> 40) & 0xff); + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0); } else { WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), (adev->vce.gpu_addr >> 8)); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), (adev->vce.gpu_addr >> 40) & 0xff); + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000); } - offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V4_0_FW_SIZE; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8)); @@ -625,280 +689,14 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev) ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); } -static int vce_v4_0_set_clockgating_state(void *handle, +static int vce_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { /* needed for driver unload*/ return 0; } -#if 0 -static bool vce_v4_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; - - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; - - return !(RREG32(mmSRBM_STATUS2) & mask); -} - -static int vce_v4_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) - if (vce_v4_0_is_idle(handle)) - return 0; - - return -ETIMEDOUT; -} - -#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ -#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ - VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) - -static bool vce_v4_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - - /* According to VCE team , we should use VCE_STATUS instead - * SRBM_STATUS.VCE_BUSY bit for busy status checking. - * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE - * instance's registers are accessed - * (0 for 1st instance, 10 for 2nd instance). - * - *VCE_STATUS - *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | - *|----+----+-----------+----+----+----+----------+---------+----| - *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| - * - * VCE team suggest use bit 3--bit 6 for busy status check - */ - mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - if (srbm_soft_reset) { - adev->vce.srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->vce.srbm_soft_reset = 0; - return false; - } -} - -static int vce_v4_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->vce.srbm_soft_reset) - return 0; - srbm_soft_reset = adev->vce.srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int vce_v4_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_suspend(adev); -} - - -static int vce_v4_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_resume(adev); -} - -static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) -{ - u32 tmp, data; - - tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL)); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data); -} - -static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, - bool gated) -{ - u32 data; - - /* Set Override to disable Clock Gating */ - vce_v4_0_override_vce_clock_gating(adev, true); - - /* This function enables MGCG which is controlled by firmware. - With the clocks in the gated state the core is still - accessible but the firmware will throttle the clocks on the - fly as necessary. - */ - if (gated) { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data |= 0x1ff; - data &= ~0xef0000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0x3ff000; - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x2; - data &= ~0x00010000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data |= 0x37f; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } else { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data &= ~0x80010; - data |= 0xe70008; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x10000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } - vce_v4_0_override_vce_clock_gating(adev, false); -} - -static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - -static int vce_v4_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - int i; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_TONGA) || - (adev->asic_type == CHIP_FIJI)) - vce_v4_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) - return 0; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < 2; i++) { - /* Program VCE Instance 0 or 1 if not harvested */ - if (adev->vce.harvest_config & (1 << i)) - continue; - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); - - if (enable) { - /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ - uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data); - - /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data); - } - - vce_v4_0_set_vce_sw_clock_gating(adev, enable); - } - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} - -static int vce_v4_0_set_powergating_state(void *handle, +static int vce_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { /* This doesn't actually powergate the VCE block. @@ -908,24 +706,21 @@ static int vce_v4_0_set_powergating_state(void *handle, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; + struct amdgpu_device *adev = ip_block->adev; if (state == AMD_PG_STATE_GATE) - /* XXX do we need a vce_v4_0_stop()? */ - return 0; + return vce_v4_0_stop(adev); else return vce_v4_0_start(adev); } -#endif -static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) +static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, VCE_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); @@ -948,39 +743,34 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring) amdgpu_ring_write(ring, VCE_CMD_END); } -static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) +static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); - unsigned eng = ring->vm_inv_eng; - - pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); - pd_addr |= AMDGPU_PTE_VALID; + amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, val); +} - amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); - amdgpu_ring_write(ring, upper_32_bits(pd_addr)); +static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; - amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); - amdgpu_ring_write(ring, lower_32_bits(pd_addr)); + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); - amdgpu_ring_write(ring, 0xffffffff); - amdgpu_ring_write(ring, lower_32_bits(pd_addr)); + /* wait for reg writes */ + vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, + lower_32_bits(pd_addr), 0xffffffff); +} - /* flush TLB */ +static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2); - amdgpu_ring_write(ring, req); - - /* wait for flush */ - amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, val); } static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, @@ -990,11 +780,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, { uint32_t val = 0; - if (state == AMDGPU_IRQ_STATE_ENABLE) - val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + if (!amdgpu_sriov_vf(adev)) { + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + } return 0; } @@ -1004,10 +796,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS), - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); - switch (entry->src_data[0]) { case 0: case 1: @@ -1026,21 +814,14 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, const struct amd_ip_funcs vce_v4_0_ip_funcs = { .name = "vce_v4_0", .early_init = vce_v4_0_early_init, - .late_init = NULL, .sw_init = vce_v4_0_sw_init, .sw_fini = vce_v4_0_sw_fini, .hw_init = vce_v4_0_hw_init, .hw_fini = vce_v4_0_hw_fini, .suspend = vce_v4_0_suspend, .resume = vce_v4_0_resume, - .is_idle = NULL /* vce_v4_0_is_idle */, - .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */, - .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */, - .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */, - .soft_reset = NULL /* vce_v4_0_soft_reset */, - .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, - .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */, + .set_powergating_state = vce_v4_0_set_powergating_state, }; static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { @@ -1048,13 +829,15 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .align_mask = 0x3f, .nop = VCE_CMD_NO_OP, .support_64bit_ptrs = false, - .vmhub = AMDGPU_MMHUB, + .no_user_fence = true, .get_rptr = vce_v4_0_ring_get_rptr, .get_wptr = vce_v4_0_ring_get_wptr, .set_wptr = vce_v4_0_ring_set_wptr, - .parse_cs = amdgpu_vce_ring_parse_cs_vm, + .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm, .emit_frame_size = - 17 + /* vce_v4_0_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vce_v4_0_emit_vm_flush */ 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */ 1, /* vce_v4_0_ring_insert_end */ .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */ @@ -1068,14 +851,19 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_vce_ring_begin_use, .end_use = amdgpu_vce_ring_end_use, + .emit_wreg = vce_v4_0_emit_wreg, + .emit_reg_wait = vce_v4_0_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->vce.num_rings; i++) + for (i = 0; i < adev->vce.num_rings; i++) { adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs; + adev->vce.ring[i].me = i; + } DRM_INFO("VCE enabled in VM mode\n"); } |
