diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 566 |
1 files changed, 273 insertions, 293 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1d766ae98dc8..1c076bd1cf73 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -21,8 +21,11 @@ * * Authors: Alex Deucher */ + +#include <linux/delay.h> #include <linux/firmware.h> -#include <drm/drmP.h> +#include <linux/module.h> + #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" @@ -44,6 +47,8 @@ #include "tonga_sdma_pkt_open.h" +#include "ivsrcid/ivsrcid_vislands30.h" + static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -62,6 +67,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/vegam_sdma.bin"); +MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = @@ -192,47 +199,48 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - golden_settings_polaris11_a11, - (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); + case CHIP_VEGAM: + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris11_a11, + ARRAY_SIZE(golden_settings_polaris11_a11)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - golden_settings_polaris10_a11, - (const u32)ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris10_a11, + ARRAY_SIZE(golden_settings_polaris10_a11)); break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - cz_golden_settings_a11, - (const u32)ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_golden_settings_a11, + ARRAY_SIZE(cz_golden_settings_a11)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - stoney_golden_settings_a11, - (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_settings_a11, + ARRAY_SIZE(stoney_golden_settings_a11)); break; default: break; @@ -242,10 +250,9 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } /** @@ -260,7 +267,6 @@ static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; int err = 0, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; @@ -275,15 +281,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: chip_name = "fiji"; break; - case CHIP_POLARIS11: - chip_name = "polaris11"; - break; case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; case CHIP_POLARIS12: chip_name = "polaris12"; break; + case CHIP_VEGAM: + chip_name = "vegam"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; @@ -295,13 +304,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma.bin", chip_name); else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); + err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sdma1.bin", chip_name); if (err) goto out; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; @@ -310,22 +319,20 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) if (adev->sdma.instance[i].feature_version >= 20) adev->sdma.instance[i].burst_nop = true; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; - info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; - info->fw = adev->sdma.instance[i].fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - } + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; + info->fw = adev->sdma.instance[i].fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } out: if (err) { - pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + pr_err("sdma_v3_0: Failed to load firmware \"%s_sdma%s.bin\"\n", + chip_name, i == 0 ? "" : "1"); + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ucode_release(&adev->sdma.instance[i].fw); } return err; } @@ -340,7 +347,7 @@ out: static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) { /* XXX check if swapping is necessary on BE */ - return ring->adev->wb.wb[ring->rptr_offs] >> 2; + return *ring->rptr_cpu_addr >> 2; } /** @@ -355,13 +362,11 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; u32 wptr; - if (ring->use_doorbell) { + if (ring->use_doorbell || ring->use_pollmem) { /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; + wptr = *ring->wptr_cpu_addr >> 2; } else { - int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; - - wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; + wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2; } return wptr; @@ -379,19 +384,22 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; if (ring->use_doorbell) { + u32 *wb = (u32 *)ring->wptr_cpu_addr; /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2; - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2); - } else { - int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; + WRITE_ONCE(*wb, ring->wptr << 2); + WDOORBELL32(ring->doorbell_index, ring->wptr << 2); + } else if (ring->use_pollmem) { + u32 *wb = (u32 *)ring->wptr_cpu_addr; - WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2); + WRITE_ONCE(*wb, ring->wptr << 2); + } else { + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2); } } static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -406,21 +414,24 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (VI). */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + uint32_t flags) { - u32 vmid = vm_id & 0xf; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -441,7 +452,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask = 0; - if (ring == &ring->adev->sdma.instance[0].ring) + if (ring->me == 0) ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); else ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); @@ -457,19 +468,13 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } -static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | - SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, mmHDP_DEBUG0); - amdgpu_ring_write(ring, 1); -} - /** * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -508,15 +513,9 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); @@ -525,8 +524,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->ready = false; - sdma1->ready = false; } /** @@ -551,17 +548,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); - if (enable) + if (enable) { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 1); - else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + } + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); } } @@ -605,16 +638,15 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; - u32 wb_offset; u32 doorbell; + u64 wptr_gpu_addr; int i, j, r; for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; amdgpu_ring_clear_ring(ring); - wb_offset = (ring->rptr_offs * 4); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { @@ -651,9 +683,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], - upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], - lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); @@ -671,6 +703,27 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) } WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i], + lower_32_bits(wptr_gpu_addr)); + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i], + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); + if (ring->use_pollmem) { + /*wptr polling is not enough fast, directly clean the wptr register */ + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + ENABLE, 1); + } else { + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + ENABLE, 0); + } + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl); + /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); @@ -682,8 +735,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->ready = true; } /* unhalt the MEs */ @@ -693,14 +744,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); } return 0; @@ -721,42 +767,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) } /** - * sdma_v3_0_load_microcode - load the sDMA ME ucode - * - * @adev: amdgpu_device pointer - * - * Loads the sDMA0/1 ucode. - * Returns 0 for success, -EINVAL if the ucode is not available. - */ -static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) -{ - const struct sdma_firmware_header_v1_0 *hdr; - const __le32 *fw_data; - u32 fw_size; - int i, j; - - /* halt the MEs */ - sdma_v3_0_enable(adev, false); - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (!adev->sdma.instance[i].fw) - return -EINVAL; - hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; - amdgpu_ucode_print_sdma_hdr(&hdr->header); - fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - fw_data = (const __le32 *) - (adev->sdma.instance[i].fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); - for (j = 0; j < fw_size; j++) - WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); - } - - return 0; -} - -/** * sdma_v3_0_start - setup and start the async dma engines * * @adev: amdgpu_device pointer @@ -766,24 +776,7 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) */ static int sdma_v3_0_start(struct amdgpu_device *adev) { - int r, i; - - if (!adev->pp_enabled) { - if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) { - r = sdma_v3_0_load_microcode(adev); - if (r) - return r; - } else { - for (i = 0; i < adev->sdma.num_instances; i++) { - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - (i == 0) ? - AMDGPU_UCODE_ID_SDMA0 : - AMDGPU_UCODE_ID_SDMA1); - if (r) - return -EINVAL; - } - } - } + int r; /* disable sdma engine before programing it */ sdma_v3_0_ctx_switch_enable(adev, false); @@ -818,22 +811,17 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); @@ -847,18 +835,14 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } - if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } @@ -866,6 +850,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) * sdma_v3_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. @@ -880,21 +865,18 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) goto err0; - } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); @@ -913,26 +895,21 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: - amdgpu_ib_free(adev, &ib, NULL); + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1023,16 +1000,17 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, /** * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1063,7 +1041,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } @@ -1072,28 +1050,16 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI). */ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | - SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); - } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); - } - amdgpu_ring_write(ring, pd_addr >> 12); - - /* flush TLB */ - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | - SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | @@ -1107,9 +1073,19 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } -static int sdma_v3_0_early_init(void *handle) +static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, val); +} + +static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; switch (adev->asic_type) { case CHIP_STONEY: @@ -1120,6 +1096,10 @@ static int sdma_v3_0_early_init(void *handle) break; } + r = sdma_v3_0_init_microcode(adev); + if (r) + return r; + sdma_v3_0_set_ring_funcs(adev); sdma_v3_0_set_buffer_funcs(adev); sdma_v3_0_set_vm_pte_funcs(adev); @@ -1128,49 +1108,45 @@ static int sdma_v3_0_early_init(void *handle) return 0; } -static int sdma_v3_0_sw_init(void *handle) +static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, &adev->sdma.trap_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, &adev->sdma.illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, &adev->sdma.illegal_inst_irq); if (r) return r; - r = sdma_v3_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; - ring->use_doorbell = true; - ring->doorbell_index = (i == 0) ? - AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1; + if (!amdgpu_sriov_vf(adev)) { + ring->use_doorbell = true; + ring->doorbell_index = adev->doorbell_index.sdma_engine[i]; + } else { + ring->use_pollmem = true; + } sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1178,9 +1154,9 @@ static int sdma_v3_0_sw_init(void *handle) return r; } -static int sdma_v3_0_sw_fini(void *handle) +static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1190,10 +1166,10 @@ static int sdma_v3_0_sw_fini(void *handle) return 0; } -static int sdma_v3_0_hw_init(void *handle) +static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_init_golden_registers(adev); @@ -1204,9 +1180,9 @@ static int sdma_v3_0_hw_init(void *handle) return r; } -static int sdma_v3_0_hw_fini(void *handle) +static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); @@ -1214,23 +1190,19 @@ static int sdma_v3_0_hw_fini(void *handle) return 0; } -static int sdma_v3_0_suspend(void *handle) +static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_fini(adev); + return sdma_v3_0_hw_fini(ip_block); } -static int sdma_v3_0_resume(void *handle) +static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_init(adev); + return sdma_v3_0_hw_init(ip_block); } -static bool sdma_v3_0_is_idle(void *handle) +static bool sdma_v3_0_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1240,11 +1212,11 @@ static bool sdma_v3_0_is_idle(void *handle) return true; } -static int sdma_v3_0_wait_for_idle(void *handle) +static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1257,9 +1229,9 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); @@ -1278,9 +1250,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle) } } -static int sdma_v3_0_pre_soft_reset(void *handle) +static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1297,9 +1269,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle) return 0; } -static int sdma_v3_0_post_soft_reset(void *handle) +static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1316,9 +1288,9 @@ static int sdma_v3_0_post_soft_reset(void *handle) return 0; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp; @@ -1355,7 +1327,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1371,7 +1343,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); @@ -1437,8 +1409,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + u8 instance_id, queue_id; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + + if (instance_id <= 1 && queue_id == 0) + drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); return 0; } @@ -1507,10 +1485,10 @@ static void sdma_v3_0_update_sdma_medium_grain_light_sleep( } } -static int sdma_v3_0_set_clockgating_state(void *handle, +static int sdma_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1530,15 +1508,15 @@ static int sdma_v3_0_set_clockgating_state(void *handle, return 0; } -static int sdma_v3_0_set_powergating_state(void *handle, +static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; } -static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags) +static void sdma_v3_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int data; if (amdgpu_sriov_vf(adev)) @@ -1558,7 +1536,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags) static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, - .late_init = NULL, .sw_init = sdma_v3_0_sw_init, .sw_fini = sdma_v3_0_sw_fini, .hw_init = sdma_v3_0_hw_init, @@ -1581,14 +1558,15 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .align_mask = 0xf, .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = false, + .secure_submission_supported = true, .get_rptr = sdma_v3_0_ring_get_rptr, .get_wptr = sdma_v3_0_ring_get_wptr, .set_wptr = sdma_v3_0_ring_set_wptr, .emit_frame_size = 6 + /* sdma_v3_0_ring_emit_hdp_flush */ - 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ + 3 + /* hdp invalidate */ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ - 12 + /* sdma_v3_0_ring_emit_vm_flush */ + VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ .emit_ib = sdma_v3_0_ring_emit_ib, @@ -1596,19 +1574,21 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, - .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate, .test_ring = sdma_v3_0_ring_test_ring, .test_ib = sdma_v3_0_ring_test_ib, .insert_nop = sdma_v3_0_ring_insert_nop, .pad_ib = sdma_v3_0_ring_pad_ib, + .emit_wreg = sdma_v3_0_ring_emit_wreg, }; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; + adev->sdma.instance[i].ring.me = i; + } } static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { @@ -1630,10 +1610,11 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) /** * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -1642,7 +1623,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); @@ -1657,7 +1639,7 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1677,25 +1659,25 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, } static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { - .copy_max_bytes = 0x1fffff, + .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ .copy_num_dw = 7, .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, - .fill_max_bytes = 0x1fffff, + .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ .fill_num_dw = 5, .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, }; static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = sdma_v3_0_vm_copy_pte, + .write_pte = sdma_v3_0_vm_write_pte, .set_pte_pde = sdma_v3_0_vm_set_pte_pde, }; @@ -1704,14 +1686,12 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v3_0_ip_block = |
