diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 686 |
1 files changed, 306 insertions, 380 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 3a9fb6018c16..2b7aba22ecc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -20,8 +20,10 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ + #include <linux/firmware.h> -#include <drm/drmP.h> +#include <linux/module.h> + #include "amdgpu.h" #include "amdgpu_ih.h" #include "amdgpu_gfx.h" @@ -53,6 +55,9 @@ #define GFX7_NUM_GFX_RINGS 1 #define GFX7_MEC_HPD_SIZE 2048 +#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 +#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 + static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); @@ -88,8 +93,7 @@ MODULE_FIRMWARE("amdgpu/mullins_ce.bin"); MODULE_FIRMWARE("amdgpu/mullins_rlc.bin"); MODULE_FIRMWARE("amdgpu/mullins_mec.bin"); -static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = -{ +static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, @@ -108,8 +112,7 @@ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} }; -static const u32 spectre_rlc_save_restore_register_list[] = -{ +static const u32 spectre_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), @@ -555,8 +558,7 @@ static const u32 spectre_rlc_save_restore_register_list[] = (0x0e00 << 16) | (0x9600 >> 2), }; -static const u32 kalindi_rlc_save_restore_register_list[] = -{ +static const u32 kalindi_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), @@ -881,10 +883,20 @@ static const u32 kalindi_rlc_save_restore_register_list[] = }; static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); +} + /* * Core functions */ @@ -900,7 +912,6 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; int err; DRM_DEBUG("\n"); @@ -921,92 +932,53 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_MULLINS: chip_name = "mullins"; break; - default: BUG(); + default: + BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_pfp.bin", chip_name); if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.me_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_me.bin", chip_name); if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.ce_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_ce.bin", chip_name); if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_mec.bin", chip_name); if (err) goto out; if (adev->asic_type == CHIP_KAVERI) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_mec2.bin", chip_name); if (err) goto out; } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); - + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc.bin", chip_name); out: if (err) { - pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; + pr_err("gfx7: Failed to load firmware %s gfx firmware\n", chip_name); + gfx_v7_0_free_microcode(adev); } return err; } -static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) -{ - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.mec_fw); - adev->gfx.mec_fw = NULL; - release_firmware(adev->gfx.mec2_fw); - adev->gfx.mec2_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; -} - /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -1578,13 +1550,14 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @se_num: shader engine to address * @sh_num: sh block to address - * - * Select which SE, SH combinations to address. Certain - * registers are instanced per SE or SH. 0xffffffff means - * broadcast to all SEs or SHs (CIK). + * @instance: Certain registers are instanced per SE or SH. + * 0xffffffff means broadcast to all SEs or SHs (CIK). + * @xcc_id: xcc accelerated compute core id + * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -1764,21 +1737,19 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } /** * gfx_v7_0_setup_rb - setup the RBs on the asic * * @adev: amdgpu_device pointer - * @se_num: number of SEs (shader engines) for the asic - * @sh_per_se: number of SH blocks per SE for the asic * * Configures per-SE/SH RB registers (CIK). */ @@ -1795,13 +1766,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1824,7 +1795,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1835,10 +1806,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } +#define DEFAULT_SH_MEM_BASES (0x6000) /** * gfx_v7_0_init_compute_vmid - gart enable * @@ -1847,9 +1819,6 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) * Initialize compute vmid sh_mem registers * */ -#define DEFAULT_SH_MEM_BASES (0x6000) -#define FIRST_COMPUTE_VMID (8) -#define LAST_COMPUTE_VMID (16) static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -1867,7 +1836,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; mutex_lock(&adev->srbm_mutex); - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { cik_srbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32(mmSH_MEM_CONFIG, sh_mem_config); @@ -1877,6 +1846,33 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + access. These should be enabled by FW for target VMIDs. */ + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } +} + +static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { + WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); + WREG32(amdgpu_gds_reg_offset[vmid].oa, 0); + } } static void gfx_v7_0_config_init(struct amdgpu_device *adev) @@ -1920,7 +1916,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -1957,6 +1953,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v7_0_init_compute_vmid(adev); + gfx_v7_0_init_gds_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); @@ -2023,30 +2020,9 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) udelay(50); } -/* - * GPU scratch registers helpers function. - */ -/** - * gfx_v7_0_scratch_init - setup driver info for CP scratch regs - * - * @adev: amdgpu_device pointer - * - * Set up the number and offset of the CP scratch registers. - * NOTE: use of CP scratch registers is a legacy inferface and - * is not used by default on newer asics (r6xx+). On newer asics, - * memory buffers are used for fences rather than scratch regs. - */ -static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - /** * gfx_v7_0_ring_test_ring - basic gfx ring test * - * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Allocate a scratch register and write to it using the gfx ring (CIK). @@ -2057,44 +2033,35 @@ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) - goto error_free_scratch; + return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } if (i >= adev->usec_timeout) r = -ETIMEDOUT; - -error_free_scratch: - amdgpu_gfx_scratch_free(adev, scratch); return r; } /** - * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp + * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp * - * @adev: amdgpu_device pointer - * @ridx: amdgpu ring index + * @ring: amdgpu_ring structure holding ring information * * Emits an hdp flush on the cp. */ @@ -2143,10 +2110,12 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) /** * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring * - * @adev: amdgpu_device pointer - * @fence: amdgpu fence object + * @ring: amdgpu_ring structure holding ring information + * @addr: address + * @seq: sequence number + * @flags: fence related flags * - * Emits a fence sequnce number on the gfx ring and flushes + * Emits a fence sequence number on the gfx ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, @@ -2154,6 +2123,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; + /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ @@ -2173,7 +2144,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | - EVENT_INDEX(5))); + EVENT_INDEX(5) | + (exec ? EOP_EXEC : 0))); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); @@ -2184,10 +2156,12 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, /** * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring * - * @adev: amdgpu_device pointer - * @fence: amdgpu fence object + * @ring: amdgpu_ring structure holding ring information + * @addr: address + * @seq: sequence number + * @flags: fence related flags * - * Emits a fence sequnce number on the compute ring and flushes + * Emits a fence sequence number on the compute ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, @@ -2214,27 +2188,29 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * IB stuff */ /** - * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring + * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring * * @ring: amdgpu_ring structure holding ring information + * @job: job to retrieve vmid from * @ib: amdgpu indirect buffer object + * @flags: options (AMDGPU_HAVE_CTX_SWITCH) * * Emits an DE (drawing engine) or CE (constant engine) IB * on the gfx ring. IBs are usually generated by userspace * acceleration drivers and submitted to the kernel for - * sheduling on the ring. This function schedules the IB + * scheduling on the ring. This function schedules the IB * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (ctx_switch) { + if (flags & AMDGPU_HAVE_CTX_SWITCH) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -2259,11 +2235,27 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN @@ -2298,6 +2290,7 @@ static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) * gfx_v7_0_ring_test_ib - basic ring IB test * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Allocate an IB and execute it on the gfx ring (CIK). * Provides a basic gfx ring test to verify that IBs are working. @@ -2308,53 +2301,46 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; uint32_t tmp = 0; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); if (r) - goto err1; + return r; ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); + ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START; ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) - goto err2; + goto error; r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { r = -ETIMEDOUT; - goto err2; + goto error; } else if (r < 0) { - goto err2; + goto error; } - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) r = 0; else r = -EINVAL; -err2: - amdgpu_ib_free(adev, &ib, NULL); +error: + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); -err1: - amdgpu_gfx_scratch_free(adev, scratch); return r; } /* * CP. - * On CIK, gfx and compute now have independant command processors. + * On CIK, gfx and compute now have independent command processors. * * GFX * Gfx consists of a single ring and can process both gfx jobs and @@ -2385,15 +2371,12 @@ err1: */ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_ME_CNTL, 0); - } else { - WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } + else + WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | + CP_ME_CNTL__PFP_HALT_MASK | + CP_ME_CNTL__CE_HALT_MASK)); udelay(50); } @@ -2585,8 +2568,8 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ - rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + /* set the wb address whether it's enabled or not */ + rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -2611,7 +2594,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring) { - return ring->adev->wb.wb[ring->rptr_offs]; + return *ring->rptr_cpu_addr; } static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) @@ -2632,7 +2615,7 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { /* XXX check if swapping is necessary on BE */ - return ring->adev->wb.wb[ring->wptr_offs]; + return *ring->wptr_cpu_addr; } static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) @@ -2640,7 +2623,7 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } @@ -2654,15 +2637,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) */ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_MEC_CNTL, 0); - } else { - WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; - } + else + WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); udelay(50); } @@ -2757,7 +2736,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2767,7 +2746,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) * GFX7_MEC_HPD_SIZE * 2; r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); @@ -2786,45 +2766,6 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) return 0; } -struct hqd_registers -{ - u32 cp_mqd_base_addr; - u32 cp_mqd_base_addr_hi; - u32 cp_hqd_active; - u32 cp_hqd_vmid; - u32 cp_hqd_persistent_state; - u32 cp_hqd_pipe_priority; - u32 cp_hqd_queue_priority; - u32 cp_hqd_quantum; - u32 cp_hqd_pq_base; - u32 cp_hqd_pq_base_hi; - u32 cp_hqd_pq_rptr; - u32 cp_hqd_pq_rptr_report_addr; - u32 cp_hqd_pq_rptr_report_addr_hi; - u32 cp_hqd_pq_wptr_poll_addr; - u32 cp_hqd_pq_wptr_poll_addr_hi; - u32 cp_hqd_pq_doorbell_control; - u32 cp_hqd_pq_wptr; - u32 cp_hqd_pq_control; - u32 cp_hqd_ib_base_addr; - u32 cp_hqd_ib_base_addr_hi; - u32 cp_hqd_ib_rptr; - u32 cp_hqd_ib_control; - u32 cp_hqd_iq_timer; - u32 cp_hqd_iq_rptr; - u32 cp_hqd_dequeue_request; - u32 cp_hqd_dma_offload; - u32 cp_hqd_sema_cmd; - u32 cp_hqd_msg_type; - u32 cp_hqd_atomic0_preop_lo; - u32 cp_hqd_atomic0_preop_hi; - u32 cp_hqd_atomic1_preop_lo; - u32 cp_hqd_atomic1_preop_hi; - u32 cp_hqd_hq_scheduler0; - u32 cp_hqd_hq_scheduler1; - u32 cp_mqd_control; -}; - static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, int mec, int pipe) { @@ -2940,12 +2881,12 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ - wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + wb_gpu_addr = ring->wptr_gpu_addr; mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - /* set the wb address wether it's enabled or not */ - wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = ring->rptr_gpu_addr; mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; @@ -2999,7 +2940,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_active = 1; } -int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) +static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) { uint32_t tmp; uint32_t mqd_reg; @@ -3155,9 +3096,9 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) } /** - * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP + * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP * - * @ring: the ring to emmit the commands to + * @ring: the ring to emit the commands to * * Sync the command pipeline with the PFP. E.g. wait for everything * to be completed. @@ -3179,7 +3120,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 4); /* poll interval */ if (usepfp) { - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + /* sync CE with ME to prevent CE fetch CEIB before context switch done */ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -3196,7 +3137,9 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) /** * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP * - * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using the CP (CIK). @@ -3300,6 +3243,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); + return 0; } @@ -3323,7 +3270,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3331,7 +3278,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3383,7 +3330,7 @@ static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { u32 tmp, i, mask; @@ -3405,7 +3352,7 @@ static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { u32 tmp; @@ -3496,7 +3443,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3524,6 +3471,22 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) +{ + u32 data; + + amdgpu_gfx_off_ctrl(adev, false); + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); +} + static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; @@ -3536,7 +3499,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3590,7 +3553,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3641,7 +3604,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -3919,70 +3882,24 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); - switch (adev->asic_type) { - case CHIP_BONAIRE: - buffer[count++] = cpu_to_le32(0x16000012); - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_KAVERI: - buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_KABINI: - case CHIP_MULLINS: - buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_HAWAII: - buffer[count++] = cpu_to_le32(0x3a00161a); - buffer[count++] = cpu_to_le32(0x0000002e); - break; - default: - buffer[count++] = cpu_to_le32(0x00000000); - buffer[count++] = cpu_to_le32(0x00000000); - break; - } - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); + buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v7_0_init_pg(struct amdgpu_device *adev) @@ -4117,7 +4034,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -4139,9 +4056,10 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -4151,9 +4069,9 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { - cik_srbm_select(adev, me, pipe, q, 0); + cik_srbm_select(adev, me, pipe, q, vm); } static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { @@ -4175,15 +4093,18 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, - .start = gfx_v7_0_rlc_start + .start = gfx_v7_0_rlc_start, + .update_spm_vmid = gfx_v7_0_update_spm_vmid }; -static int gfx_v7_0_early_init(void *handle) +static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); adev->gfx.funcs = &gfx_v7_0_gfx_funcs; adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); @@ -4193,9 +4114,9 @@ static int gfx_v7_0_early_init(void *handle) return 0; } -static int gfx_v7_0_late_init(void *handle) +static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4212,7 +4133,7 @@ static int gfx_v7_0_late_init(void *handle) static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; @@ -4289,10 +4210,14 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -4372,7 +4297,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -4380,10 +4306,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, return 0; } -static int gfx_v7_0_sw_init(void *handle) +static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j, k, r, ring_id; switch (adev->asic_type) { @@ -4418,8 +4344,6 @@ static int gfx_v7_0_sw_init(void *handle) if (r) return r; - gfx_v7_0_scratch_init(adev); - r = gfx_v7_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); @@ -4444,7 +4368,9 @@ static int gfx_v7_0_sw_init(void *handle) ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -4454,7 +4380,8 @@ static int gfx_v7_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v7_0_compute_ring_init(adev, @@ -4472,17 +4399,18 @@ static int gfx_v7_0_sw_init(void *handle) gfx_v7_0_gpu_early_init(adev); + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + return r; } -static int gfx_v7_0_sw_fini(void *handle) +static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4505,13 +4433,15 @@ static int gfx_v7_0_sw_fini(void *handle) return 0; } -static int gfx_v7_0_hw_init(void *handle) +static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v7_0_constants_init(adev); + /* init CSB */ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* init rlc */ r = adev->gfx.rlc.funcs->resume(adev); if (r) @@ -4524,9 +4454,9 @@ static int gfx_v7_0_hw_init(void *handle) return r; } -static int gfx_v7_0_hw_fini(void *handle) +static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4537,23 +4467,19 @@ static int gfx_v7_0_hw_fini(void *handle) return 0; } -static int gfx_v7_0_suspend(void *handle) +static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_fini(adev); + return gfx_v7_0_hw_fini(ip_block); } -static int gfx_v7_0_resume(void *handle) +static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_init(adev); + return gfx_v7_0_hw_init(ip_block); } -static bool gfx_v7_0_is_idle(void *handle) +static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) return false; @@ -4561,11 +4487,11 @@ static bool gfx_v7_0_is_idle(void *handle) return true; } -static int gfx_v7_0_wait_for_idle(void *handle) +static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4578,11 +4504,11 @@ static int gfx_v7_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v7_0_soft_reset(void *handle) +static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -4785,7 +4711,7 @@ static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { switch (type) { - case AMDGPU_CP_IRQ_GFX_EOP: + case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: gfx_v7_0_set_gfx_eop_interrupt_state(adev, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: @@ -4888,11 +4814,11 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v7_0_set_clockgating_state(void *handle, +static int gfx_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (state == AMD_CG_STATE_GATE) gate = true; @@ -4911,11 +4837,11 @@ static int gfx_v7_0_set_clockgating_state(void *handle, return 0; } -static int gfx_v7_0_set_powergating_state(void *handle, +static int gfx_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (state == AMD_PG_STATE_GATE) gate = true; @@ -4936,6 +4862,32 @@ static int gfx_v7_0_set_powergating_state(void *handle, return 0; } +static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + +static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -4968,7 +4920,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ - 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ + 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ + 5, /* SURFACE_SYNC */ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, @@ -4983,6 +4936,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, + .emit_mem_sync = gfx_v7_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -4999,8 +4953,9 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 5 + /* hdp invalidate */ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */ - 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ + 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ + 7, /* gfx_v7_0_emit_mem_sync_compute */ + .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */ .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, @@ -5012,6 +4967,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v7_0_ring_emit_wreg, + .soft_recovery = gfx_v7_0_ring_soft_recovery, + .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5054,29 +5011,10 @@ static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); - adev->gds.gws.total_size = 64; - adev->gds.oa.total_size = 16; - - if (adev->gds.mem.total_size == 64 * 1024) { - adev->gds.mem.gfx_partition_size = 4096; - adev->gds.mem.cs_partition_size = 4096; - - adev->gds.gws.gfx_partition_size = 4; - adev->gds.gws.cs_partition_size = 4; - - adev->gds.oa.gfx_partition_size = 4; - adev->gds.oa.cs_partition_size = 1; - } else { - adev->gds.mem.gfx_partition_size = 1024; - adev->gds.mem.cs_partition_size = 1024; - - adev->gds.gws.gfx_partition_size = 16; - adev->gds.gws.cs_partition_size = 16; - - adev->gds.oa.gfx_partition_size = 4; - adev->gds.oa.cs_partition_size = 4; - } + adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE); + adev->gds.gws_size = 64; + adev->gds.oa_size = 16; + adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); } @@ -5103,18 +5041,18 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v7_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); - cu_info->bitmap[i][j] = bitmap; + cu_info->bitmap[0][i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) { if (counter < ao_cu_num) ao_bitmap |= mask; - counter ++; + counter++; } mask <<= 1; } @@ -5124,7 +5062,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; @@ -5136,17 +5074,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->lds_size = 64; } -const struct amdgpu_ip_block_version gfx_v7_0_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_GFX, - .major = 7, - .minor = 0, - .rev = 0, - .funcs = &gfx_v7_0_ip_funcs, -}; - -const struct amdgpu_ip_block_version gfx_v7_1_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 1, @@ -5154,8 +5082,7 @@ const struct amdgpu_ip_block_version gfx_v7_1_ip_block = .funcs = &gfx_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gfx_v7_2_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_2_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 2, @@ -5163,8 +5090,7 @@ const struct amdgpu_ip_block_version gfx_v7_2_ip_block = .funcs = &gfx_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gfx_v7_3_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_3_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 3, |
