diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cik_sdma.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 225 |
1 files changed, 100 insertions, 125 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 45795191de1f..9e8715b4739d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -21,8 +21,10 @@ * * Authors: Alex Deucher */ + #include <linux/firmware.h> -#include <drm/drmP.h> +#include <linux/module.h> + #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" @@ -52,7 +54,9 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); -static int cik_sdma_soft_reset(void *handle); +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); + +u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); @@ -65,16 +69,12 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin"); MODULE_FIRMWARE("amdgpu/mullins_sdma.bin"); MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin"); -u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); - - static void cik_sdma_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /* @@ -106,7 +106,6 @@ static void cik_sdma_free_microcode(struct amdgpu_device *adev) static int cik_sdma_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; int err = 0, i; DRM_DEBUG("\n"); @@ -132,21 +131,22 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma.bin", chip_name); else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma1.bin", chip_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); } out: if (err) { - pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + pr_err("cik_sdma: Failed to load firmware \"%s_sdma%s.bin\"\n", + chip_name, i == 0 ? "" : "1"); + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } @@ -162,7 +162,7 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) { u32 rptr; - rptr = ring->adev->wb.wb[ring->rptr_offs]; + rptr = *ring->rptr_cpu_addr; return (rptr & 0x3fffc) >> 2; } @@ -193,7 +193,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], - (lower_32_bits(ring->wptr) << 2) & 0x3fffc); + (ring->wptr << 2) & 0x3fffc); } static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) @@ -213,20 +213,22 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrive vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (CIK). */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ @@ -265,7 +267,9 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) * cik_sdma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -303,23 +307,15 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq */ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -432,12 +428,10 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; u32 rb_bufsz; - u32 wb_offset; int i, j, r; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - wb_offset = (ring->rptr_offs * 4); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { @@ -473,9 +467,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], - upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], - ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); + ((ring->rptr_gpu_addr) & 0xFFFFFFFC)); rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; @@ -483,7 +477,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); ring->wptr = 0; - WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); /* enable DMA RB */ WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], @@ -495,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->sched.ready = true; } cik_sdma_enable(adev, true); @@ -506,9 +498,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(ring); if (r) return r; - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return 0; @@ -640,7 +629,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } if (i >= adev->usec_timeout) @@ -655,6 +644,7 @@ error_free_wb: * cik_sdma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. @@ -677,7 +667,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -706,7 +697,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = -EINVAL; err1: - amdgpu_ib_free(adev, &ib, NULL); + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: amdgpu_device_wb_free(adev, index); @@ -714,7 +705,7 @@ err0: } /** - * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -740,7 +731,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, } /** - * cik_sdma_vm_write_pages - update PTEs by writing them manually + * cik_sdma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -769,7 +760,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_set_pages - update the page tables using sDMA + * cik_sdma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -798,8 +789,9 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * cik_sdma_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ @@ -809,7 +801,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -848,7 +840,8 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (CIK). @@ -926,12 +919,17 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(void *handle) +static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; + r = cik_sdma_init_microcode(adev); + if (r) + return r; + cik_sdma_set_ring_funcs(adev); cik_sdma_set_irq_funcs(adev); cik_sdma_set_buffer_funcs(adev); @@ -940,18 +938,12 @@ static int cik_sdma_early_init(void *handle) return 0; } -static int cik_sdma_sw_init(void *handle) +static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, i; - r = cik_sdma_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); @@ -976,9 +968,9 @@ static int cik_sdma_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -986,9 +978,9 @@ static int cik_sdma_sw_init(void *handle) return r; } -static int cik_sdma_sw_fini(void *handle) +static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -998,21 +990,16 @@ static int cik_sdma_sw_fini(void *handle) return 0; } -static int cik_sdma_hw_init(void *handle) +static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - r = cik_sdma_start(adev); - if (r) - return r; - - return r; + return cik_sdma_start(adev); } -static int cik_sdma_hw_fini(void *handle) +static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); @@ -1020,25 +1007,21 @@ static int cik_sdma_hw_fini(void *handle) return 0; } -static int cik_sdma_suspend(void *handle) +static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_sdma_hw_fini(adev); + return cik_sdma_hw_fini(ip_block); } -static int cik_sdma_resume(void *handle) +static int cik_sdma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_sdma_soft_reset(handle); + cik_sdma_soft_reset(ip_block); - return cik_sdma_hw_init(adev); + return cik_sdma_hw_init(ip_block); } -static bool cik_sdma_is_idle(void *handle) +static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1048,43 +1031,36 @@ static bool cik_sdma_is_idle(void *handle) return true; } -static int cik_sdma_wait_for_idle(void *handle) +static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | - SRBM_STATUS2__SDMA1_BUSY_MASK); - - if (!tmp) + if (cik_sdma_is_idle(ip_block)) return 0; udelay(1); } return -ETIMEDOUT; } -static int cik_sdma_soft_reset(void *handle) +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 tmp = RREG32(mmSRBM_STATUS2); + struct amdgpu_device *adev = ip_block->adev; + u32 tmp; - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - } + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1114,7 +1090,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1130,7 +1106,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); @@ -1205,11 +1181,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } -static int cik_sdma_set_clockgating_state(void *handle, +static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (state == AMD_CG_STATE_GATE) gate = true; @@ -1220,7 +1196,7 @@ static int cik_sdma_set_clockgating_state(void *handle, return 0; } -static int cik_sdma_set_powergating_state(void *handle, +static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -1229,7 +1205,6 @@ static int cik_sdma_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_sdma_ip_funcs = { .name = "cik_sdma", .early_init = cik_sdma_early_init, - .late_init = NULL, .sw_init = cik_sdma_sw_init, .sw_fini = cik_sdma_sw_fini, .hw_init = cik_sdma_hw_init, @@ -1299,10 +1274,11 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) /** * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (CIK). * Used by the amdgpu ttm implementation to move pages if @@ -1311,7 +1287,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = byte_count; @@ -1325,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, /** * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1370,16 +1347,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version cik_sdma_ip_block = |
