diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cik_sdma.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 435 |
1 files changed, 223 insertions, 212 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c216e16826c9..9e8715b4739d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -21,8 +21,10 @@ * * Authors: Alex Deucher */ + #include <linux/firmware.h> -#include <drm/drmP.h> +#include <linux/module.h> + #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" @@ -52,29 +54,27 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); -static int cik_sdma_soft_reset(void *handle); - -MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); -MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); -MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); -MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); -MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); -MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); -MODULE_FIRMWARE("radeon/kabini_sdma.bin"); -MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); -MODULE_FIRMWARE("radeon/mullins_sdma.bin"); -MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); +MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/kabini_sdma.bin"); +MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/mullins_sdma.bin"); +MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin"); static void cik_sdma_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /* @@ -106,7 +106,6 @@ static void cik_sdma_free_microcode(struct amdgpu_device *adev) static int cik_sdma_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; int err = 0, i; DRM_DEBUG("\n"); @@ -132,21 +131,22 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) - snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma.bin", chip_name); else - snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma1.bin", chip_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); } out: if (err) { - pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + pr_err("cik_sdma: Failed to load firmware \"%s_sdma%s.bin\"\n", + chip_name, i == 0 ? "" : "1"); + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } @@ -162,7 +162,7 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) { u32 rptr; - rptr = ring->adev->wb.wb[ring->rptr_offs]; + rptr = *ring->rptr_cpu_addr; return (rptr & 0x3fffc) >> 2; } @@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; - return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; + return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2; } /** @@ -192,15 +191,14 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; - WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], - (lower_32_bits(ring->wptr) << 2) & 0x3fffc); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], + (ring->wptr << 2) & 0x3fffc); } static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -215,18 +213,22 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrive vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (CIK). */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + uint32_t flags) { - u32 extra_bits = vm_id & 0xf; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ @@ -248,7 +250,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ u32 ref_and_mask; - if (ring == &ring->adev->sdma.instance[0].ring) + if (ring->me == 0) ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; else ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; @@ -261,18 +263,13 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } -static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - amdgpu_ring_write(ring, mmHDP_DEBUG0); - amdgpu_ring_write(ring, 1); -} - /** * cik_sdma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -310,23 +307,15 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq */ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } - sdma0->ready = false; - sdma1->ready = false; } /** @@ -342,6 +331,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev) } /** + * cik_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch (VI). + */ +static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); + if (enable) { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 0); + } + + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** * cik_sdma_enable - stop the async dma engines * * @adev: amdgpu_device pointer @@ -382,12 +428,10 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; u32 rb_bufsz; - u32 wb_offset; int i, j, r; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - wb_offset = (ring->rptr_offs * 4); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { @@ -423,9 +467,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], - upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], - ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); + ((ring->rptr_gpu_addr) & 0xFFFFFFFC)); rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; @@ -433,7 +477,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); ring->wptr = 0; - WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); /* enable DMA RB */ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], @@ -445,22 +489,15 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->ready = true; } cik_sdma_enable(adev, true); for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); } return 0; @@ -537,6 +574,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) /* halt the engine before programing */ cik_sdma_enable(adev, false); + /* enable sdma ring preemption */ + cik_ctx_switch_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -567,22 +606,18 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); @@ -594,18 +629,14 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } - if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } @@ -613,6 +644,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) * cik_sdma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. @@ -627,21 +659,18 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) goto err0; - } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); @@ -656,32 +685,27 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: - amdgpu_ib_free(adev, &ib, NULL); + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } /** - * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -707,7 +731,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, } /** - * cik_sdma_vm_write_pages - update PTEs by writing them manually + * cik_sdma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -736,7 +760,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_set_pages - update the page tables using sDMA + * cik_sdma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -765,18 +789,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * cik_sdma_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -807,7 +832,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ } @@ -815,29 +840,19 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (CIK). */ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); - } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); - } - amdgpu_ring_write(ring, pd_addr >> 12); - - /* flush TLB */ - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); @@ -847,6 +862,14 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } +static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, val); +} + static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, bool enable) { @@ -896,12 +919,17 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(void *handle) +static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; + r = cik_sdma_init_microcode(adev); + if (r) + return r; + cik_sdma_set_ring_funcs(adev); cik_sdma_set_irq_funcs(adev); cik_sdma_set_buffer_funcs(adev); @@ -910,32 +938,26 @@ static int cik_sdma_early_init(void *handle) return 0; } -static int cik_sdma_sw_init(void *handle) +static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, i; - r = cik_sdma_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, &adev->sdma.illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247, &adev->sdma.illegal_inst_irq); if (r) return r; @@ -946,9 +968,9 @@ static int cik_sdma_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -956,9 +978,9 @@ static int cik_sdma_sw_init(void *handle) return r; } -static int cik_sdma_sw_fini(void *handle) +static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -968,46 +990,38 @@ static int cik_sdma_sw_fini(void *handle) return 0; } -static int cik_sdma_hw_init(void *handle) +static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - r = cik_sdma_start(adev); - if (r) - return r; + struct amdgpu_device *adev = ip_block->adev; - return r; + return cik_sdma_start(adev); } -static int cik_sdma_hw_fini(void *handle) +static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); return 0; } -static int cik_sdma_suspend(void *handle) +static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_sdma_hw_fini(adev); + return cik_sdma_hw_fini(ip_block); } -static int cik_sdma_resume(void *handle) +static int cik_sdma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_sdma_soft_reset(ip_block); - cik_sdma_soft_reset(handle); - - return cik_sdma_hw_init(adev); + return cik_sdma_hw_init(ip_block); } -static bool cik_sdma_is_idle(void *handle) +static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1017,43 +1031,36 @@ static bool cik_sdma_is_idle(void *handle) return true; } -static int cik_sdma_wait_for_idle(void *handle) +static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | - SRBM_STATUS2__SDMA1_BUSY_MASK); - - if (!tmp) + if (cik_sdma_is_idle(ip_block)) return 0; udelay(1); } return -ETIMEDOUT; } -static int cik_sdma_soft_reset(void *handle) +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 tmp = RREG32(mmSRBM_STATUS2); + struct amdgpu_device *adev = ip_block->adev; + u32 tmp; - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - } + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1083,7 +1090,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1099,7 +1106,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); @@ -1166,16 +1173,19 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + u8 instance_id; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + instance_id = (entry->ring_id & 0x3) >> 0; + drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); return 0; } -static int cik_sdma_set_clockgating_state(void *handle, +static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (state == AMD_CG_STATE_GATE) gate = true; @@ -1186,7 +1196,7 @@ static int cik_sdma_set_clockgating_state(void *handle, return 0; } -static int cik_sdma_set_powergating_state(void *handle, +static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -1195,7 +1205,6 @@ static int cik_sdma_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_sdma_ip_funcs = { .name = "cik_sdma", .early_init = cik_sdma_early_init, - .late_init = NULL, .sw_init = cik_sdma_sw_init, .sw_fini = cik_sdma_sw_fini, .hw_init = cik_sdma_hw_init, @@ -1219,9 +1228,9 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .set_wptr = cik_sdma_ring_set_wptr, .emit_frame_size = 6 + /* cik_sdma_ring_emit_hdp_flush */ - 3 + /* cik_sdma_ring_emit_hdp_invalidate */ + 3 + /* hdp invalidate */ 6 + /* cik_sdma_ring_emit_pipeline_sync */ - 12 + /* cik_sdma_ring_emit_vm_flush */ + CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ .emit_ib = cik_sdma_ring_emit_ib, @@ -1229,19 +1238,21 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, .emit_vm_flush = cik_sdma_ring_emit_vm_flush, .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, - .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate, .test_ring = cik_sdma_ring_test_ring, .test_ib = cik_sdma_ring_test_ib, .insert_nop = cik_sdma_ring_insert_nop, .pad_ib = cik_sdma_ring_pad_ib, + .emit_wreg = cik_sdma_ring_emit_wreg, }; static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; + adev->sdma.instance[i].ring.me = i; + } } static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { @@ -1263,10 +1274,11 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) /** * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (CIK). * Used by the amdgpu ttm implementation to move pages if @@ -1275,7 +1287,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = byte_count; @@ -1289,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, /** * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1320,14 +1333,14 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = cik_sdma_vm_copy_pte, + .write_pte = cik_sdma_vm_write_pte, .set_pte_pde = cik_sdma_vm_set_pte_pde, }; @@ -1336,14 +1349,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version cik_sdma_ip_block = |
