diff options
author | Sean Paul <seanpaul@chromium.org> | 2018-10-24 14:26:04 -0400 |
---|---|---|
committer | Sean Paul <seanpaul@chromium.org> | 2018-10-24 14:26:04 -0400 |
commit | 6542e9adc0da1e23d81ff9314265a029b961906d (patch) | |
tree | 35ba360a7150d8b042a9fd75ab54ef83b34a2b95 /drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | |
parent | 2b02a05bdc3a62d36e0d0b015351897109e25991 (diff) | |
parent | f2bfc71aee75feff33ca659322b72ffeed5a243d (diff) |
Merge drm/drm-next into drm-misc-next
4.19 is out, Lyude asked for a backmerge, and it's been a while. All
very good reasons on their own :-)
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 166 |
1 files changed, 67 insertions, 99 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 4ed86218cef3..8af67f649660 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -24,46 +24,21 @@ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_ih.h" -#include "amdgpu_amdkfd.h" - -/** - * amdgpu_ih_ring_alloc - allocate memory for the IH ring - * - * @adev: amdgpu_device pointer - * - * Allocate a ring buffer for the interrupt controller. - * Returns 0 for success, errors for failure. - */ -static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) -{ - int r; - - /* Allocate ring buffer */ - if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - if (r) { - DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); - return r; - } - } - return 0; -} /** * amdgpu_ih_ring_init - initialize the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to initialize + * @ring_size: ring size to allocate + * @use_bus_addr: true when we can use dma_alloc_coherent * * Initializes the IH state and allocates a buffer * for the IH ring buffer. * Returns 0 for success, errors for failure. */ -int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, - bool use_bus_addr) +int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + unsigned ring_size, bool use_bus_addr) { u32 rb_bufsz; int r; @@ -71,70 +46,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, /* Align ring size */ rb_bufsz = order_base_2(ring_size / 4); ring_size = (1 << rb_bufsz) * 4; - adev->irq.ih.ring_size = ring_size; - adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1; - adev->irq.ih.rptr = 0; - adev->irq.ih.use_bus_addr = use_bus_addr; - - if (adev->irq.ih.use_bus_addr) { - if (!adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - adev->irq.ih.ring = pci_alloc_consistent(adev->pdev, - adev->irq.ih.ring_size + 8, - &adev->irq.ih.rb_dma_addr); - if (adev->irq.ih.ring == NULL) - return -ENOMEM; - memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8); - adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0; - adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1; - } - return 0; + ih->ring_size = ring_size; + ih->ptr_mask = ih->ring_size - 1; + ih->rptr = 0; + ih->use_bus_addr = use_bus_addr; + + if (use_bus_addr) { + if (ih->ring) + return 0; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8, + &ih->rb_dma_addr, GFP_KERNEL); + if (ih->ring == NULL) + return -ENOMEM; + + memset((void *)ih->ring, 0, ih->ring_size + 8); + ih->wptr_offs = (ih->ring_size / 4) + 0; + ih->rptr_offs = (ih->ring_size / 4) + 1; } else { - r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs); + r = amdgpu_device_wb_get(adev, &ih->wptr_offs); + if (r) + return r; + + r = amdgpu_device_wb_get(adev, &ih->rptr_offs); if (r) { - dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs); + r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); if (r) { - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->rptr_offs); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - - return amdgpu_ih_ring_alloc(adev); } + return 0; } /** * amdgpu_ih_ring_fini - tear down the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to tear down * * Tears down the IH state and frees buffer * used for the IH ring buffer. */ -void amdgpu_ih_ring_fini(struct amdgpu_device *adev) +void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - if (adev->irq.ih.use_bus_addr) { - if (adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8, - (void *)adev->irq.ih.ring, - adev->irq.ih.rb_dma_addr); - adev->irq.ih.ring = NULL; - } + if (ih->use_bus_addr) { + if (!ih->ring) + return; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + dma_free_coherent(adev->dev, ih->ring_size + 8, + (void *)ih->ring, ih->rb_dma_addr); + ih->ring = NULL; } else { - amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs); + amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); + amdgpu_device_wb_free(adev, ih->wptr_offs); + amdgpu_device_wb_free(adev, ih->rptr_offs); } } @@ -142,56 +123,43 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) * amdgpu_ih_process - interrupt handler * * @adev: amdgpu_device pointer + * @ih: ih ring to process * * Interrupt hander (VI), walk the IH ring. * Returns irq process return code. */ -int amdgpu_ih_process(struct amdgpu_device *adev) +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + void (*callback)(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih)) { - struct amdgpu_iv_entry entry; u32 wptr; - if (!adev->irq.ih.enabled || adev->shutdown) + if (!ih->enabled || adev->shutdown) return IRQ_NONE; wptr = amdgpu_ih_get_wptr(adev); restart_ih: /* is somebody else already processing irqs? */ - if (atomic_xchg(&adev->irq.ih.lock, 1)) + if (atomic_xchg(&ih->lock, 1)) return IRQ_NONE; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr); + DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); - while (adev->irq.ih.rptr != wptr) { - u32 ring_index = adev->irq.ih.rptr >> 2; - - /* Prescreening of high-frequency interrupts */ - if (!amdgpu_ih_prescreen_iv(adev)) { - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; - continue; - } - - /* Before dispatching irq to IP blocks, send it to amdkfd */ - amdgpu_amdkfd_interrupt(adev, - (const void *) &adev->irq.ih.ring[ring_index]); - - entry.iv_entry = (const uint32_t *) - &adev->irq.ih.ring[ring_index]; - amdgpu_ih_decode_iv(adev, &entry); - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; - - amdgpu_irq_dispatch(adev, &entry); + while (ih->rptr != wptr) { + callback(adev, ih); + ih->rptr &= ih->ptr_mask; } + amdgpu_ih_set_rptr(adev); - atomic_set(&adev->irq.ih.lock, 0); + atomic_set(&ih->lock, 0); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev); - if (wptr != adev->irq.ih.rptr) + if (wptr != ih->rptr) goto restart_ih; return IRQ_HANDLED; |