diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 669 |
1 files changed, 307 insertions, 362 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 4523df2785d6..ce6e04242c52 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -22,6 +22,9 @@ */ #include <linux/firmware.h> #include <linux/pci.h> + +#include <drm/drm_cache.h> + #include "amdgpu.h" #include "amdgpu_atomfirmware.h" #include "gmc_v10_0.h" @@ -48,16 +51,9 @@ #include "athub_v2_0.h" #include "athub_v2_1.h" -#if 0 -static const struct soc15_reg_golden golden_settings_navi10_hdp[] = -{ - /* TODO add golden setting for hdp */ -}; -#endif - static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { return 0; @@ -65,21 +61,33 @@ static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type, + struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + /* This works because this interrupt is only + * enabled at init/resume and disabled in + * fini/suspend, so the overall state doesn't + * change over the course of suspend/resume. + */ + if (!adev->in_s0ix) + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -92,9 +100,14 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - bool retry_fault = !!(entry->src_data[1] & 0x80); - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; - struct amdgpu_task_info task_info; + uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ? + AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); + struct amdgpu_task_info *task_info; uint32_t status = 0; u64 addr; @@ -106,7 +119,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Process it onyl if it's the first fault for this address */ if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) return 1; @@ -121,7 +134,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) return 1; } @@ -131,32 +145,39 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_SIENNA_CICHLID)) + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, + entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); } if (!printk_ratelimit()) return 0; - memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); - dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", entry->vmid_src ? "mmhub" : "gfxhub", - entry->src_id, entry->ring_id, entry->vmid, - entry->pasid, task_info.process_name, task_info.tgid, - task_info.task_name, task_info.pid); + entry->src_id, entry->ring_id, entry->vmid, entry->pasid); + task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); + if (task_info) { + amdgpu_vm_print_task_info(adev, task_info); + amdgpu_vm_put_task_info(task_info); + } + dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n", - addr, entry->client_id, - soc15_ih_clientid_name[entry->client_id]); + addr, entry->client_id, + soc15_ih_clientid_name[entry->client_id]); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); @@ -194,8 +215,7 @@ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -219,20 +239,48 @@ static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info( * by the amdgpu vm/hsa code. */ -static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, - unsigned int vmhub, uint32_t flush_type) +/** + * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * @vmhub: vmhub type + * @flush_type: the flush type + * + * Flush the TLB for the requested page table. + */ +static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 tmp; /* Use register 17 for GART */ - const unsigned eng = 17; - unsigned int i; + const unsigned int eng = 17; unsigned char hub_ip = 0; + u32 sem, req, ack; + unsigned int i; + u32 tmp; + + sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; + req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? - GC_HWIP : MMHUB_HWIP; + /* flush hdp cache */ + amdgpu_device_flush_hdp(adev, NULL); + + /* This is necessary for SRIOV as well as for GFXOFF to function + * properly under bare metal + */ + if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { + amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, GET_INST(GC, 0)); + return; + } + + /* This path is needed before KIQ/MES/GFXOFF are set up */ + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -246,9 +294,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, hub_ip); - + tmp = RREG32_RLC_NO_KIQ(sem, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -258,24 +304,19 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng, - inv_req, hub_ip); + WREG32_RLC_NO_KIQ(req, inv_req, hub_ip); /* * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_SIENNA_CICHLID)) - RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng, hub_ip); + if ((vmhub == AMDGPU_GFXHUB(0)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0))) + RREG32_RLC_NO_KIQ(req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng, hub_ip); - + tmp = RREG32_RLC_NO_KIQ(ack, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -285,113 +326,13 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) - /* - * add semaphore release after invalidation, - * write with 0 means semaphore release - */ - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0, hub_ip); + WREG32_RLC_NO_KIQ(sem, 0, hub_ip); spin_unlock(&adev->gmc.invalidate_lock); - if (i < adev->usec_timeout) - return; - - DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub); -} - -/** - * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback - * - * @adev: amdgpu_device pointer - * @vmid: vm instance to flush - * @vmhub: vmhub type - * @flush_type: the flush type - * - * Flush the TLB for the requested page table. - */ -static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, - uint32_t vmhub, uint32_t flush_type) -{ - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct dma_fence *fence; - struct amdgpu_job *job; - - int r; - - /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); - - /* For SRIOV run time, driver shouldn't access the register through MMIO - * Directly use kiq to do the vm invalidation instead - */ - if (adev->gfx.kiq.ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - down_read_trylock(&adev->reset_sem)) { - struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned eng = 17; - u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); - u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; - u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); - - up_read(&adev->reset_sem); - return; - } - - mutex_lock(&adev->mman.gtt_window_lock); - - if (vmhub == AMDGPU_MMHUB_0) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); - mutex_unlock(&adev->mman.gtt_window_lock); - return; - } - - BUG_ON(vmhub != AMDGPU_GFXHUB_0); - - if (!adev->mman.buffer_funcs_enabled || - !adev->ib_pool_ready || - amdgpu_in_reset(adev) || - ring->sched.ready == false) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); - mutex_unlock(&adev->mman.gtt_window_lock); - return; - } - - /* The SDMA on Navi has a bug which can theoretically result in memory - * corruption if an invalidation happens at the same time as an VA - * translation. Avoid this by doing the invalidation from the SDMA - * itself. - */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, - &job); - if (r) - goto error_alloc; - - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); - job->vm_needs_flush = true; - job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; - amdgpu_ring_pad_ib(ring, &job->ibs[0]); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_submit; - - mutex_unlock(&adev->mman.gtt_window_lock); - - dma_fence_wait(fence, false); - dma_fence_put(fence); - - return; - -error_submit: - amdgpu_job_free(job); - -error_alloc: - mutex_unlock(&adev->mman.gtt_window_lock); - DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); + if (i >= adev->usec_timeout) + dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n", + vmhub); } /** @@ -401,72 +342,44 @@ error_alloc: * @pasid: pasid to be flush * @flush_type: the flush type * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB() + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ -static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, uint32_t flush_type, - bool all_hub) +static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint32_t inst) { + uint16_t queried; int vmid, i; - signed long r; - uint32_t seq; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - - if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq.ring_lock); - /* 2 dwords flush + 8 dwords fence */ - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); - kiq->pmf->kiq_invalidate_tlbs(ring, - pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - return -ETIME; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; - } for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { - - ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - if (all_hub) { - for (i = 0; i < adev->num_vmhubs; i++) - gmc_v10_0_flush_gpu_tlb(adev, vmid, - i, flush_type); - } else { - gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); - } - break; + bool valid; + + valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried); + if (!valid || queried != pasid) + continue; + + if (all_hub) { + for_each_set_bit(i, adev->vmhubs_mask, + AMDGPU_MAX_VMHUBS) + gmc_v10_0_flush_gpu_tlb(adev, vmid, i, + flush_type); + } else { + gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), + flush_type); } } - - return 0; } static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { - bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -508,13 +421,13 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; - if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -555,24 +468,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid * 0 valid */ -static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) -{ - switch (flags) { - case AMDGPU_VM_MTYPE_DEFAULT: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - case AMDGPU_VM_MTYPE_NC: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - case AMDGPU_VM_MTYPE_WC: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); - case AMDGPU_VM_MTYPE_CC: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); - case AMDGPU_VM_MTYPE_UC: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); - default: - return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - } -} - static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { @@ -597,28 +492,56 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, - struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo, + uint32_t vm_flags, uint64_t *flags) { - *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) + *flags |= AMDGPU_PTE_EXECUTABLE; + else + *flags &= ~AMDGPU_PTE_EXECUTABLE; - *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { + case AMDGPU_VM_MTYPE_DEFAULT: + case AMDGPU_VM_MTYPE_NC: + default: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC); + break; + case AMDGPU_VM_MTYPE_WC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC); + break; + case AMDGPU_VM_MTYPE_CC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC); + break; + case AMDGPU_VM_MTYPE_UC: + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); + break; + } - if (mapping->flags & AMDGPU_PTE_PRT) { + if (vm_flags & AMDGPU_VM_PAGE_NOALLOC) + *flags |= AMDGPU_PTE_NOALLOC; + else + *flags &= ~AMDGPU_PTE_NOALLOC; + + if (vm_flags & AMDGPU_VM_PAGE_PRT) { *flags |= AMDGPU_PTE_PRT; *flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_LOG; *flags |= AMDGPU_PTE_SYSTEM; *flags &= ~AMDGPU_PTE_VALID; } + + if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_EXT_COHERENT | + AMDGPU_GEM_CREATE_UNCACHED)) + *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); } -static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; @@ -642,7 +565,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, - .map_mtype = gmc_v10_0_map_mtype, .get_vm_pde = gmc_v10_0_get_vm_pde, .get_vm_pte = gmc_v10_0_get_vm_pte, .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size, @@ -656,26 +578,27 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { + case IP_VERSION(8, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; + adev->umc.retire_unit = 1; adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v8_7_ras_funcs; + adev->umc.ras = &umc_v8_7_ras; break; default: break; } } - static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 4, 0): + case IP_VERSION(2, 4, 1): adev->mmhub.funcs = &mmhub_v2_3_funcs; break; default: @@ -686,13 +609,15 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 6): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 7): adev->gfxhub.funcs = &gfxhub_v2_1_funcs; break; default: @@ -702,9 +627,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) } -static int gmc_v10_0_early_init(void *handle) +static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_set_mmhub_funcs(adev); gmc_v10_0_set_gfxhub_funcs(adev); @@ -718,13 +643,14 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } -static int gmc_v10_0_late_init(void *handle) +static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -748,9 +674,11 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); - amdgpu_gmc_gart_location(adev, mc); - amdgpu_gmc_agp_location(adev, mc); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); @@ -787,35 +715,30 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); adev->gmc.aper_size = adev->gmc.real_vram_size; } #endif - /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { default: adev->gmc.gart_size = 512ULL << 20; break; + case IP_VERSION(10, 3, 1): /* DCE SG support */ + case IP_VERSION(10, 3, 3): /* DCE SG support */ + case IP_VERSION(10, 3, 6): /* DCE SG support */ + case IP_VERSION(10, 3, 7): /* DCE SG support */ + adev->gmc.gart_size = 1024ULL << 20; + break; } - } else + } else { adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; + } gmc_v10_0_vram_gtt_location(adev, &adev->gmc); @@ -837,16 +760,16 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return r; adev->gart.table_size = adev->gart.num_gpu_pages * 8; - adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | + adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) | AMDGPU_PTE_EXECUTABLE; return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v10_0_sw_init(void *handle) +static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfxhub.funcs->init(adev); @@ -869,17 +792,40 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.vram_vendor = vram_vendor; } - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - adev->num_vmhubs = 2; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(10, 3, 0): + adev->gmc.mall_size = 128 * 1024 * 1024; + break; + case IP_VERSION(10, 3, 2): + adev->gmc.mall_size = 96 * 1024 * 1024; + break; + case IP_VERSION(10, 3, 4): + adev->gmc.mall_size = 32 * 1024 * 1024; + break; + case IP_VERSION(10, 3, 5): + adev->gmc.mall_size = 16 * 1024 * 1024; + break; + default: + adev->gmc.mall_size = 0; + break; + } + + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 1, 4): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 6): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 7): + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, @@ -921,22 +867,17 @@ static int gmc_v10_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } - if (adev->gmc.xgmi.supported) { - r = adev->gfxhub.funcs->get_xgmi_info(adev); - if (r) - return r; - } + adev->need_swiotlb = drm_need_swiotlb(44); r = gmc_v10_0_mc_init(adev); if (r) return r; amdgpu_gmc_get_vbios_allocations(adev); - amdgpu_gmc_get_reserved_allocation(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -957,6 +898,10 @@ static int gmc_v10_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -972,9 +917,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v10_0_sw_fini(void *handle) +static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); @@ -986,20 +931,6 @@ static int gmc_v10_0_sw_fini(void *handle) static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - break; - default: - break; - } } /** @@ -1017,13 +948,13 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); - if (r) - return r; + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - r = adev->gfxhub.funcs->gart_enable(adev); - if (r) - return r; + if (!adev->in_s0ix) { + r = adev->gfxhub.funcs->gart_enable(adev); + if (r) + return r; + } r = adev->mmhub.funcs->gart_enable(adev); if (r) @@ -1032,29 +963,30 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) adev->hdp.funcs->init_registers(adev); /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); - value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? - false : true; + value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; - adev->gfxhub.funcs->set_fault_enable_default(adev, value); + if (!adev->in_s0ix) + adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); + if (!adev->in_s0ix) + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; - return 0; } -static int gmc_v10_0_hw_init(void *handle) +static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; /* The sequence of these two function calls matters.*/ gmc_v10_0_init_golden_registers(adev); @@ -1063,13 +995,19 @@ static int gmc_v10_0_hw_init(void *handle) * harvestable groups in gc_utcl2 need to be programmed before any GFX block * register setup within GMC, or else system hang when harvesting SA. */ - if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) + if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) adev->gfxhub.funcs->utcl2_harvest(adev); r = gmc_v10_0_gart_enable(adev); if (r) return r; + if (amdgpu_emu_mode == 1) { + r = amdgpu_gmc_vram_checking(adev); + if (r) + return r; + } + if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); @@ -1085,14 +1023,16 @@ static int gmc_v10_0_hw_init(void *handle) */ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) { - adev->gfxhub.funcs->gart_disable(adev); + if (!adev->in_s0ix) + adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); - amdgpu_gart_table_vram_unpin(adev); } -static int gmc_v10_0_hw_fini(void *handle) +static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + gmc_v10_0_gart_disable(adev); if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -1100,84 +1040,91 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - gmc_v10_0_gart_disable(adev); + + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); return 0; } -static int gmc_v10_0_suspend(void *handle) +static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v10_0_hw_fini(adev); + gmc_v10_0_hw_fini(ip_block); return 0; } -static int gmc_v10_0_resume(void *handle) +static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v10_0_hw_init(adev); + r = gmc_v10_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } -static bool gmc_v10_0_is_idle(void *handle) +static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block) { /* MC is always ready in GMC v10.*/ return true; } -static int gmc_v10_0_wait_for_idle(void *handle) +static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v10.*/ return 0; } -static int gmc_v10_0_soft_reset(void *handle) -{ - return 0; -} - -static int gmc_v10_0_set_clockgating_state(void *handle, +static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + /* + * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled + * is a new problem observed at DF 3.0.3, however with the same suspend sequence not + * seen any issue on the DF 3.0.2 series platform. + */ + if (adev->in_s0ix && + amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) { + dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); + return 0; + } r = adev->mmhub.funcs->set_clockgating(adev, state); if (r) return r; - if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_YELLOW_CARP) + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) return athub_v2_1_set_clockgating(adev, state); else return athub_v2_0_set_clockgating(adev, state); } -static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) +static void gmc_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4)) + return; adev->mmhub.funcs->get_clockgating(adev, flags); - if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_YELLOW_CARP) + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) athub_v2_1_get_clockgating(adev, flags); else athub_v2_0_get_clockgating(adev, flags); } -static int gmc_v10_0_set_powergating_state(void *handle, +static int gmc_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -1195,14 +1142,12 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .resume = gmc_v10_0_resume, .is_idle = gmc_v10_0_is_idle, .wait_for_idle = gmc_v10_0_wait_for_idle, - .soft_reset = gmc_v10_0_soft_reset, .set_clockgating_state = gmc_v10_0_set_clockgating_state, .set_powergating_state = gmc_v10_0_set_powergating_state, .get_clockgating_state = gmc_v10_0_get_clockgating_state, }; -const struct amdgpu_ip_block_version gmc_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 10, .minor = 0, |
