From 6f05c4e9d1a1a743eca76ddbad788386d7bb534e Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 22 Jan 2018 11:25:48 +0100 Subject: drm/amdgpu: move static CSA address to top of address space v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the CSA area to the top of the VA space to avoid clashing with HMM/ATC in the lower range on GFX9. v2: wrong sign noticed by Roger, rebase on CSA_VADDR cleanup, handle VA hole on GFX9 as well. Signed-off-by: Christian König Acked-by: Alex Deucher Acked-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e7dfb7b44b4b..b832651d2137 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -24,6 +24,18 @@ #include "amdgpu.h" #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ +uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; + + addr -= AMDGPU_VA_RESERVED_SIZE; + + if (addr >= AMDGPU_VA_HOLE_START) + addr |= AMDGPU_VA_HOLE_END; + + return addr; +} + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ @@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) { /* * amdgpu_map_static_csa should be called during amdgpu_vm_init - * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" - * to this VM, and each command submission of GFX should use this virtual - * address within META_DATA init package to support SRIOV gfx preemption. + * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command + * submission of GFX should use this virtual address within META_DATA init + * package to support SRIOV gfx preemption. */ - int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va) { + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; @@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); @@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_EXECUTABLE); -- cgit From a22144a58f784265fe8140724a7390443f63ef53 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 25 Dec 2017 15:59:30 +0800 Subject: drm/amdgpu: try again kiq access if not in IRQ(v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sometimes GPU is switched to other VFs and won't swich back soon, so the kiq reg access will not signal within a short period, instead of busy waiting a long time(MAX_KEQ_REG_WAIT) and returning TMO we can istead sleep 5ms and try again later (non irq context) And since the waiting in kiq_r/weg is busy wait, so MAX_KIQ_REG_WAIT shouldn't set to a long time, set it to 10ms is more appropriate. if gpu already in reset state, don't retry the KIQ reg access otherwise it would always hang because KIQ was already die usually. v2: replace schedule() with msleep() for the wait v3: use while loop for the wait repeating use macros for the sleep period more description for it v4: drop unused variable Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 64 ++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b832651d2137..42c140155b70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -22,7 +22,9 @@ */ #include "amdgpu.h" -#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ +#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ +#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ +#define MAX_KIQ_REG_TRY 20 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) { @@ -137,9 +139,9 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { - signed long r; + signed long r, cnt = 0; unsigned long flags; - uint32_t val, seq; + uint32_t seq; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; @@ -153,18 +155,36 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) spin_unlock_irqrestore(&kiq->ring_lock, flags); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld\n", r); - return ~0; + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); } - val = adev->wb.wb[adev->virt.reg_val_offs]; - return val; + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return adev->wb.wb[adev->virt.reg_val_offs]; + +failed_kiq_read: + pr_err("failed to read reg:%x\n", reg); + return ~0; } void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { - signed long r; + signed long r, cnt = 0; unsigned long flags; uint32_t seq; struct amdgpu_kiq *kiq = &adev->gfx.kiq; @@ -180,8 +200,30 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) spin_unlock_irqrestore(&kiq->ring_lock, flags); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - if (r < 1) - DRM_ERROR("wait for kiq fence error: %ld\n", r); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_write; + + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; + + return; + +failed_kiq_write: + pr_err("failed to write reg:%x\n", reg); } /** -- cgit From dccf1eff352a8803eea4a8785ed3eb7e962d196f Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 5 Mar 2018 19:26:36 +0800 Subject: drm/amdgpu: give warning before sleep in kiq_r/wreg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to catch error that may schedule in atomic context early on Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 42c140155b70..21adb1b6e5cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -167,6 +167,9 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) if (r < 1 && (adev->in_gpu_reset || in_interrupt())) goto failed_kiq_read; + if (in_interrupt()) + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); @@ -212,7 +215,11 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) if (r < 1 && (adev->in_gpu_reset || in_interrupt())) goto failed_kiq_write; + if (in_interrupt()) + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); } -- cgit