diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 130 |
1 files changed, 85 insertions, 45 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index fafebec5b7b6..8112ffc85995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -99,6 +99,21 @@ const char *soc15_ih_clientid_name[] = { "MP1" }; +const int node_id_to_phys_map[NODEID_MAX] = { + [AID0_NODEID] = 0, + [XCD0_NODEID] = 0, + [XCD1_NODEID] = 1, + [AID1_NODEID] = 1, + [XCD2_NODEID] = 2, + [XCD3_NODEID] = 3, + [AID2_NODEID] = 2, + [XCD4_NODEID] = 4, + [XCD5_NODEID] = 5, + [AID3_NODEID] = 3, + [XCD6_NODEID] = 6, + [XCD7_NODEID] = 7, +}; + /** * amdgpu_irq_disable_all - disable *all* interrupts * @@ -109,7 +124,7 @@ const char *soc15_ih_clientid_name[] = { void amdgpu_irq_disable_all(struct amdgpu_device *adev) { unsigned long irqflags; - unsigned i, j, k; + unsigned int i, j, k; int r; spin_lock_irqsave(&adev->irq.lock, irqflags); @@ -124,12 +139,12 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) continue; for (k = 0; k < src->num_types; ++k) { - atomic_set(&src->enabled_types[k], 0); r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) - DRM_ERROR("error disabling interrupt (%d)\n", - r); + dev_err(adev->dev, + "error disabling interrupt (%d)\n", + r); } } } @@ -228,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } -static void amdgpu_restore_msix(struct amdgpu_device *adev) +void amdgpu_restore_msix(struct amdgpu_device *adev) { u16 ctrl; @@ -256,29 +271,29 @@ static void amdgpu_restore_msix(struct amdgpu_device *adev) */ int amdgpu_irq_init(struct amdgpu_device *adev) { - int r = 0; - unsigned int irq; + unsigned int irq, flags; + int r; spin_lock_init(&adev->irq.lock); /* Enable MSI if not disabled by module parameter */ adev->irq.msi_enabled = false; - if (amdgpu_msi_ok(adev)) { - int nvec = pci_msix_vec_count(adev->pdev); - unsigned int flags; + if (!amdgpu_msi_ok(adev)) + flags = PCI_IRQ_INTX; + else + flags = PCI_IRQ_ALL_TYPES; - if (nvec <= 0) { - flags = PCI_IRQ_MSI; - } else { - flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; - } - /* we only need one vector */ - nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); - if (nvec > 0) { - adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "using MSI/MSI-X.\n"); - } + /* we only need one vector */ + r = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); + if (r < 0) { + dev_err(adev->dev, "Failed to alloc msi vectors\n"); + return r; + } + + if (amdgpu_msi_ok(adev)) { + adev->irq.msi_enabled = true; + dev_dbg(adev->dev, "using MSI/MSI-X.\n"); } INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); @@ -288,22 +303,29 @@ int amdgpu_irq_init(struct amdgpu_device *adev) /* Use vector 0 for MSI-X. */ r = pci_irq_vector(adev->pdev, 0); if (r < 0) - return r; + goto free_vectors; irq = r; /* PCI devices require shared interrupts. */ r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, adev_to_drm(adev)); if (r) - return r; + goto free_vectors; + adev->irq.installed = true; adev->irq.irq = irq; adev_to_drm(adev)->max_vblank_count = 0x00ffffff; - DRM_DEBUG("amdgpu: irq initialized.\n"); + dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); return 0; -} +free_vectors: + if (adev->irq.msi_enabled) + pci_free_irq_vectors(adev->pdev); + + adev->irq.msi_enabled = false; + return r; +} void amdgpu_irq_fini_hw(struct amdgpu_device *adev) { @@ -331,7 +353,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev) */ void amdgpu_irq_fini_sw(struct amdgpu_device *adev) { - unsigned i, j; + unsigned int i, j; for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) @@ -365,7 +387,7 @@ void amdgpu_irq_fini_sw(struct amdgpu_device *adev) * 0 on success or error code otherwise */ int amdgpu_irq_add_id(struct amdgpu_device *adev, - unsigned client_id, unsigned src_id, + unsigned int client_id, unsigned int src_id, struct amdgpu_irq_src *source) { if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) @@ -417,13 +439,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, { u32 ring_index = ih->rptr >> 2; struct amdgpu_iv_entry entry; - unsigned client_id, src_id; + unsigned int client_id, src_id; struct amdgpu_irq_src *src; bool handled = false; int r; entry.ih = ih; entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; + + /* + * timestamp is not supported on some legacy SOCs (cik, cz, iceland, + * si and tonga), so initialize timestamp and timestamp_src to 0 + */ + entry.timestamp = 0; + entry.timestamp_src = 0; + amdgpu_ih_decode_iv(adev, &entry); trace_amdgpu_iv(ih - &adev->irq.ih, &entry); @@ -432,28 +462,33 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, src_id = entry.src_id; if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { - DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); + dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { - DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); + dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); - } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) && + } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || + (client_id == SOC15_IH_CLIENTID_ISP)) && adev->irq.virq[src_id]) { generic_handle_domain_irq(adev->irq.domain, src_id); } else if (!adev->irq.client[client_id].sources) { - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", - client_id, src_id); + dev_dbg(adev->dev, + "Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); } else if ((src = adev->irq.client[client_id].sources[src_id])) { r = src->funcs->process(adev, src, &entry); if (r < 0) - DRM_ERROR("error processing interrupt (%d)\n", r); + dev_err(adev->dev, "error processing interrupt (%d)\n", + r); else if (r) handled = true; } else { - DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); + dev_dbg(adev->dev, + "Unregistered interrupt src_id: %d of client_id:%d\n", + src_id, client_id); } /* Send it to amdkfd as well if it isn't already handled */ @@ -478,7 +513,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, unsigned int num_dw) { - amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw); + amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw); schedule_work(&adev->irq.ih_soft_work); } @@ -492,7 +527,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev, * Updates interrupt state for the specific source (all ASICs). */ int amdgpu_irq_update(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type) + struct amdgpu_irq_src *src, unsigned int type) { unsigned long irqflags; enum amdgpu_interrupt_state state; @@ -501,7 +536,8 @@ int amdgpu_irq_update(struct amdgpu_device *adev, spin_lock_irqsave(&adev->irq.lock, irqflags); /* We need to determine after taking the lock, otherwise - we might disable just enabled interrupts again */ + * we might disable just enabled interrupts again + */ if (amdgpu_irq_enabled(adev, src, type)) state = AMDGPU_IRQ_STATE_ENABLE; else @@ -555,7 +591,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) * 0 on success or error code otherwise */ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { if (!adev->irq.installed) return -ENOENT; @@ -585,8 +621,12 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, * 0 on success or error code otherwise */ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { + /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ + if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type)) + return -EINVAL; + if (!adev->irq.installed) return -ENOENT; @@ -619,7 +659,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, * invalid parameters */ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { if (!adev->irq.installed) return false; @@ -693,10 +733,10 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { */ int amdgpu_irq_add_domain(struct amdgpu_device *adev) { - adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, - &amdgpu_hw_irqdomain_ops, adev); + adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, + &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { - DRM_ERROR("GPU irq add domain failed\n"); + dev_err(adev->dev, "GPU irq add domain failed\n"); return -ENODEV; } @@ -732,7 +772,7 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev) * Returns: * Linux IRQ */ -unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) +unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id) { adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); |
