diff options
author | Dave Airlie <airlied@redhat.com> | 2022-04-28 14:33:20 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2022-04-28 14:33:20 +1000 |
commit | dbe946287e0825f0e9cd4cbeacfcde9d9b2dd168 (patch) | |
tree | 3ac3e33cfbb6670827e27cd22faf2ba03ddc4209 /drivers/gpu/drm/amd/amdkfd | |
parent | 19df0cfa258cd42f7f106f6085f1e625f26283db (diff) | |
parent | d68cf992ded575928cf4ddf7c64faff0d8dcce14 (diff) |
Merge tag 'amd-drm-next-5.19-2022-04-15' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.19-2022-04-15:
amdgpu:
- USB-C updates
- GPUVM updates
- TMZ fixes for RV
- DCN 3.1 pstate fixes
- Display z state fixes
- RAS fixes
- Misc code cleanups and spelling fixes
- More DC FP rework
- GPUVM TLB handling rework
- Power management sysfs code cleanup
- Add RAS support for VCN
- Backlight fix
- Add unique id support for more asics
- Misc display updates
- SR-IOV fixes
- Extend CG and PG flags to 64 bits
- Enable VCN clk sysfs nodes for navi12
amdkfd:
- Fix IO link cleanup during device removal
- RAS fixes
- Retry fault fixes
- Asynchronously free events
- SVM fixes
radeon:
- Drop some dead code
- Misc code cleanups
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220415135144.5700-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_events.c | 152 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_events.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 83 |
10 files changed, 254 insertions, 120 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 607f65ab39ac..60438193c0c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1146,7 +1146,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, long err = 0; int i; uint32_t *devices_arr = NULL; - bool table_freed = false; if (!args->n_devices) { pr_debug("Device IDs array empty\n"); @@ -1208,7 +1207,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( peer_pdd->dev->adev, (struct kgd_mem *)mem, - peer_pdd->drm_priv, &table_freed); + peer_pdd->drm_priv); if (err) { struct pci_dev *pdev = peer_pdd->dev->adev->pdev; @@ -1233,13 +1232,11 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, } /* Flush TLBs after waiting for the page table updates to complete */ - if (table_freed || !kfd_flush_tlb_after_unmap(dev)) { - for (i = 0; i < args->n_devices; i++) { - peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); - } + for (i = 0; i < args->n_devices; i++) { + peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); } kfree(devices_arr); @@ -2206,8 +2203,8 @@ static int criu_restore_bo(struct kfd_process *p, if (IS_ERR(peer_pdd)) return PTR_ERR(peer_pdd); - ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, peer_pdd->drm_priv, - NULL); + ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, + peer_pdd->drm_priv); if (ret) { pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds); return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 1eaabd2cb41b..afc8a7fcdad8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1056,7 +1056,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, * table, add corresponded reversed direction link now. */ if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) { - to_dev = kfd_topology_device_by_proximity_domain(id_to); + to_dev = kfd_topology_device_by_proximity_domain_no_lock(id_to); if (!to_dev) return -ENODEV; /* same everything but the other direction */ @@ -2225,7 +2225,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, */ if (kdev->hive_id) { for (nid = 0; nid < proximity_domain; ++nid) { - peer_dev = kfd_topology_device_by_proximity_domain(nid); + peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); if (!peer_dev->gpu) continue; if (peer_dev->gpu->hive_id != kdev->hive_id) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 64f4a51cc880..6e5e8d637f48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -128,8 +128,8 @@ static int allocate_event_notification_slot(struct kfd_process *p, } /* - * Assumes that p->event_mutex is held and of course that p is not going - * away (current or locked). + * Assumes that p->event_mutex or rcu_readlock is held and of course that p is + * not going away. */ static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) { @@ -251,16 +251,18 @@ static void destroy_event(struct kfd_process *p, struct kfd_event *ev) struct kfd_event_waiter *waiter; /* Wake up pending waiters. They will return failure */ + spin_lock(&ev->lock); list_for_each_entry(waiter, &ev->wq.head, wait.entry) - waiter->event = NULL; + WRITE_ONCE(waiter->event, NULL); wake_up_all(&ev->wq); + spin_unlock(&ev->lock); if (ev->type == KFD_EVENT_TYPE_SIGNAL || ev->type == KFD_EVENT_TYPE_DEBUG) p->signal_event_count--; idr_remove(&p->event_idr, ev->event_id); - kfree(ev); + kfree_rcu(ev, rcu); } static void destroy_events(struct kfd_process *p) @@ -392,6 +394,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, ev->auto_reset = auto_reset; ev->signaled = false; + spin_lock_init(&ev->lock); init_waitqueue_head(&ev->wq); *event_page_offset = 0; @@ -466,6 +469,7 @@ int kfd_criu_restore_event(struct file *devkfd, ev->auto_reset = ev_priv->auto_reset; ev->signaled = ev_priv->signaled; + spin_lock_init(&ev->lock); init_waitqueue_head(&ev->wq); mutex_lock(&p->event_mutex); @@ -609,13 +613,13 @@ static void set_event(struct kfd_event *ev) /* Auto reset if the list is non-empty and we're waking * someone. waitqueue_active is safe here because we're - * protected by the p->event_mutex, which is also held when + * protected by the ev->lock, which is also held when * updating the wait queues in kfd_wait_on_events. */ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); list_for_each_entry(waiter, &ev->wq.head, wait.entry) - waiter->activated = true; + WRITE_ONCE(waiter->activated, true); wake_up_all(&ev->wq); } @@ -626,16 +630,23 @@ int kfd_set_event(struct kfd_process *p, uint32_t event_id) int ret = 0; struct kfd_event *ev; - mutex_lock(&p->event_mutex); + rcu_read_lock(); ev = lookup_event_by_id(p, event_id); + if (!ev) { + ret = -EINVAL; + goto unlock_rcu; + } + spin_lock(&ev->lock); - if (ev && event_can_be_cpu_signaled(ev)) + if (event_can_be_cpu_signaled(ev)) set_event(ev); else ret = -EINVAL; - mutex_unlock(&p->event_mutex); + spin_unlock(&ev->lock); +unlock_rcu: + rcu_read_unlock(); return ret; } @@ -650,23 +661,30 @@ int kfd_reset_event(struct kfd_process *p, uint32_t event_id) int ret = 0; struct kfd_event *ev; - mutex_lock(&p->event_mutex); + rcu_read_lock(); ev = lookup_event_by_id(p, event_id); + if (!ev) { + ret = -EINVAL; + goto unlock_rcu; + } + spin_lock(&ev->lock); - if (ev && event_can_be_cpu_signaled(ev)) + if (event_can_be_cpu_signaled(ev)) reset_event(ev); else ret = -EINVAL; - mutex_unlock(&p->event_mutex); + spin_unlock(&ev->lock); +unlock_rcu: + rcu_read_unlock(); return ret; } static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev) { - page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT; + WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT); } static void set_event_from_interrupt(struct kfd_process *p, @@ -674,7 +692,9 @@ static void set_event_from_interrupt(struct kfd_process *p, { if (ev && event_can_be_gpu_signaled(ev)) { acknowledge_signal(p, ev); + spin_lock(&ev->lock); set_event(ev); + spin_unlock(&ev->lock); } } @@ -693,7 +713,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, if (!p) return; /* Presumably process exited. */ - mutex_lock(&p->event_mutex); + rcu_read_lock(); if (valid_id_bits) ev = lookup_signaled_event_by_partial_id(p, partial_id, @@ -721,7 +741,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, if (id >= KFD_SIGNAL_EVENT_LIMIT) break; - if (slots[id] != UNSIGNALED_EVENT_SLOT) + if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) set_event_from_interrupt(p, ev); } } else { @@ -730,14 +750,14 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, * only signaled events from the IDR. */ for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++) - if (slots[id] != UNSIGNALED_EVENT_SLOT) { + if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) { ev = lookup_event_by_id(p, id); set_event_from_interrupt(p, ev); } } } - mutex_unlock(&p->event_mutex); + rcu_read_unlock(); kfd_unref_process(p); } @@ -760,7 +780,7 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) return event_waiters; } -static int init_event_waiter_get_status(struct kfd_process *p, +static int init_event_waiter(struct kfd_process *p, struct kfd_event_waiter *waiter, uint32_t event_id) { @@ -769,22 +789,15 @@ static int init_event_waiter_get_status(struct kfd_process *p, if (!ev) return -EINVAL; + spin_lock(&ev->lock); waiter->event = ev; waiter->activated = ev->signaled; ev->signaled = ev->signaled && !ev->auto_reset; - - return 0; -} - -static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter) -{ - struct kfd_event *ev = waiter->event; - - /* Only add to the wait list if we actually need to - * wait on this event. - */ if (!waiter->activated) add_wait_queue(&ev->wq, &waiter->wait); + spin_unlock(&ev->lock); + + return 0; } /* test_event_condition - Test condition of events being waited for @@ -804,10 +817,10 @@ static uint32_t test_event_condition(bool all, uint32_t num_events, uint32_t activated_count = 0; for (i = 0; i < num_events; i++) { - if (!event_waiters[i].event) + if (!READ_ONCE(event_waiters[i].event)) return KFD_IOC_WAIT_RESULT_FAIL; - if (event_waiters[i].activated) { + if (READ_ONCE(event_waiters[i].activated)) { if (!all) return KFD_IOC_WAIT_RESULT_COMPLETE; @@ -836,6 +849,8 @@ static int copy_signaled_event_data(uint32_t num_events, for (i = 0; i < num_events; i++) { waiter = &event_waiters[i]; event = waiter->event; + if (!event) + return -EINVAL; /* event was destroyed */ if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { dst = &data[i].memory_exception_data; src = &event->memory_exception_data; @@ -846,11 +861,8 @@ static int copy_signaled_event_data(uint32_t num_events, } return 0; - } - - static long user_timeout_to_jiffies(uint32_t user_timeout_ms) { if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE) @@ -874,9 +886,12 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) uint32_t i; for (i = 0; i < num_events; i++) - if (waiters[i].event) + if (waiters[i].event) { + spin_lock(&waiters[i].event->lock); remove_wait_queue(&waiters[i].event->wq, &waiters[i].wait); + spin_unlock(&waiters[i].event->lock); + } kfree(waiters); } @@ -900,6 +915,9 @@ int kfd_wait_on_events(struct kfd_process *p, goto out; } + /* Use p->event_mutex here to protect against concurrent creation and + * destruction of events while we initialize event_waiters. + */ mutex_lock(&p->event_mutex); for (i = 0; i < num_events; i++) { @@ -911,8 +929,8 @@ int kfd_wait_on_events(struct kfd_process *p, goto out_unlock; } - ret = init_event_waiter_get_status(p, &event_waiters[i], - event_data.event_id); + ret = init_event_waiter(p, &event_waiters[i], + event_data.event_id); if (ret) goto out_unlock; } @@ -930,10 +948,6 @@ int kfd_wait_on_events(struct kfd_process *p, goto out_unlock; } - /* Add to wait lists if we need to wait. */ - for (i = 0; i < num_events; i++) - init_event_waiter_add_to_waitlist(&event_waiters[i]); - mutex_unlock(&p->event_mutex); while (true) { @@ -978,14 +992,19 @@ int kfd_wait_on_events(struct kfd_process *p, } __set_current_state(TASK_RUNNING); + mutex_lock(&p->event_mutex); /* copy_signaled_event_data may sleep. So this has to happen * after the task state is set back to RUNNING. + * + * The event may also have been destroyed after signaling. So + * copy_signaled_event_data also must confirm that the event + * still exists. Therefore this must be under the p->event_mutex + * which is also held when events are destroyed. */ if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) ret = copy_signaled_event_data(num_events, event_waiters, events); - mutex_lock(&p->event_mutex); out_unlock: free_waiters(num_events, event_waiters); mutex_unlock(&p->event_mutex); @@ -1044,8 +1063,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) } /* - * Assumes that p->event_mutex is held and of course - * that p is not going away (current or locked). + * Assumes that p is not going away. */ static void lookup_events_by_type_and_signal(struct kfd_process *p, int type, void *event_data) @@ -1057,6 +1075,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, ev_data = (struct kfd_hsa_memory_exception_data *) event_data; + rcu_read_lock(); + id = KFD_FIRST_NONSIGNAL_EVENT_ID; idr_for_each_entry_continue(&p->event_idr, ev, id) if (ev->type == type) { @@ -1064,9 +1084,11 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, dev_dbg(kfd_device, "Event found: id %X type %d", ev->event_id, ev->type); + spin_lock(&ev->lock); set_event(ev); if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) ev->memory_exception_data = *ev_data; + spin_unlock(&ev->lock); } if (type == KFD_EVENT_TYPE_MEMORY) { @@ -1089,6 +1111,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, p->lead_thread->pid, p->pasid); } } + + rcu_read_unlock(); } #ifdef KFD_SUPPORT_IOMMU_V2 @@ -1164,16 +1188,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && - KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) { - mutex_lock(&p->event_mutex); - - /* Lookup events by type and signal them */ + KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY, &memory_exception_data); - mutex_unlock(&p->event_mutex); - } - kfd_unref_process(p); } #endif /* KFD_SUPPORT_IOMMU_V2 */ @@ -1190,12 +1208,7 @@ void kfd_signal_hw_exception_event(u32 pasid) if (!p) return; /* Presumably process exited. */ - mutex_lock(&p->event_mutex); - - /* Lookup events by type and signal them */ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL); - - mutex_unlock(&p->event_mutex); kfd_unref_process(p); } @@ -1231,16 +1244,19 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, info->prot_write ? 1 : 0; memory_exception_data.failure.imprecise = 0; } - mutex_lock(&p->event_mutex); + + rcu_read_lock(); id = KFD_FIRST_NONSIGNAL_EVENT_ID; idr_for_each_entry_continue(&p->event_idr, ev, id) if (ev->type == KFD_EVENT_TYPE_MEMORY) { + spin_lock(&ev->lock); ev->memory_exception_data = memory_exception_data; set_event(ev); + spin_unlock(&ev->lock); } - mutex_unlock(&p->event_mutex); + rcu_read_unlock(); kfd_unref_process(p); } @@ -1274,22 +1290,28 @@ void kfd_signal_reset_event(struct kfd_dev *dev) continue; } - mutex_lock(&p->event_mutex); + rcu_read_lock(); + id = KFD_FIRST_NONSIGNAL_EVENT_ID; idr_for_each_entry_continue(&p->event_idr, ev, id) { if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { + spin_lock(&ev->lock); ev->hw_exception_data = hw_exception_data; ev->hw_exception_data.gpu_id = user_gpu_id; set_event(ev); + spin_unlock(&ev->lock); } if (ev->type == KFD_EVENT_TYPE_MEMORY && reset_cause == KFD_HW_EXCEPTION_ECC) { + spin_lock(&ev->lock); ev->memory_exception_data = memory_exception_data; ev->memory_exception_data.gpu_id = user_gpu_id; set_event(ev); + spin_unlock(&ev->lock); } } - mutex_unlock(&p->event_mutex); + + rcu_read_unlock(); } srcu_read_unlock(&kfd_processes_srcu, idx); } @@ -1322,19 +1344,25 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid) memory_exception_data.gpu_id = user_gpu_id; memory_exception_data.failure.imprecise = true; - mutex_lock(&p->event_mutex); + rcu_read_lock(); + idr_for_each_entry_continue(&p->event_idr, ev, id) { if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { + spin_lock(&ev->lock); ev->hw_exception_data = hw_exception_data; set_event(ev); + spin_unlock(&ev->lock); } if (ev->type == KFD_EVENT_TYPE_MEMORY) { + spin_lock(&ev->lock); ev->memory_exception_data = memory_exception_data; set_event(ev); + spin_unlock(&ev->lock); } } - mutex_unlock(&p->event_mutex); + + rcu_read_unlock(); /* user application will handle SIGBUS signal */ send_sig(SIGBUS, p->lead_thread, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h index 1238af11916e..1c62c8dd6460 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h @@ -59,6 +59,7 @@ struct kfd_event { int type; + spinlock_t lock; wait_queue_head_t wq; /* List of event waiters. */ /* Only for signal events. */ @@ -69,6 +70,8 @@ struct kfd_event { struct kfd_hsa_memory_exception_data memory_exception_data; struct kfd_hsa_hw_exception_data hw_exception_data; }; + + struct rcu_head rcu; /* for asynchronous kfree_rcu */ }; #define KFD_EVENT_TIMEOUT_IMMEDIATE 0 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 7eedbcd14828..03c29bdd89a1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -91,28 +91,34 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 static void event_interrupt_poison_consumption(struct kfd_dev *dev, - uint16_t pasid, uint16_t source_id) + uint16_t pasid, uint16_t client_id) { - int ret = -EINVAL; + int old_poison, ret = -EINVAL; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) return; /* all queues of a process will be unmapped in one time */ - if (atomic_read(&p->poison)) { - kfd_unref_process(p); - return; - } - - atomic_set(&p->poison, 1); + old_poison = atomic_cmpxchg(&p->poison, 0, 1); kfd_unref_process(p); + if (old_poison) + return; - switch (source_id) { - case SOC15_INTSRC_SQ_INTERRUPT_MSG: + switch (client_id) { + case SOC15_IH_CLIENTID_SE0SH: + case SOC15_IH_CLIENTID_SE1SH: + case SOC15_IH_CLIENTID_SE2SH: + case SOC15_IH_CLIENTID_SE3SH: + case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); break; - case SOC15_INTSRC_SDMA_ECC: + case SOC15_IH_CLIENTID_SDMA0: + case SOC15_IH_CLIENTID_SDMA1: + case SOC15_IH_CLIENTID_SDMA2: + case SOC15_IH_CLIENTID_SDMA3: + case SOC15_IH_CLIENTID_SDMA4: + break; default: break; } @@ -122,10 +128,17 @@ static void event_interrupt_poison_consumption(struct kfd_dev *dev, /* resetting queue passes, do page retirement without gpu reset * resetting queue fails, fallback to gpu reset solution */ - if (!ret) + if (!ret) { + dev_warn(dev->adev->dev, + "RAS poison consumption, unmap queue flow succeeded: client id %d\n", + client_id); amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); - else + } else { + dev_warn(dev->adev->dev, + "RAS poison consumption, fall back to gpu reset flow: client id %d\n", + client_id); amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + } } static bool event_interrupt_isr_v9(struct kfd_dev *dev, @@ -270,7 +283,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, sq_intr_err); if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { - event_interrupt_poison_consumption(dev, pasid, source_id); + event_interrupt_poison_consumption(dev, pasid, client_id); return; } break; @@ -291,7 +304,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, if (source_id == SOC15_INTSRC_SDMA_TRAP) { kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); } else if (source_id == SOC15_INTSRC_SDMA_ECC) { - event_interrupt_poison_consumption(dev, pasid, source_id); + event_interrupt_poison_consumption(dev, pasid, client_id); return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || @@ -300,6 +313,12 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + if (client_id == SOC15_IH_CLIENTID_UTCL2 && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + event_interrupt_poison_consumption(dev, pasid, client_id); + return; + } + info.vmid = vmid; info.mc_id = client_id; info.page_addr = ih_ring_entry[4] | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 9178cfe34f20..a9466d154395 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -146,15 +146,24 @@ static void interrupt_wq(struct work_struct *work) struct kfd_dev *dev = container_of(work, struct kfd_dev, interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; + long start_jiffies = jiffies; if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(dev->adev->dev, "Ring entry too small\n"); return; } - while (dequeue_ih_ring_entry(dev, ih_ring_entry)) + while (dequeue_ih_ring_entry(dev, ih_ring_entry)) { dev->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); + if (jiffies - start_jiffies > HZ) { + /* If we spent more than a second processing signals, + * reschedule the worker to avoid soft-lockup warnings + */ + queue_work(dev->ih_wq, &dev->interrupt_work); + break; + } + } } bool interrupt_is_wanted(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 9967a73d5b0f..644f616b776f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -706,6 +706,7 @@ struct kfd_process_device { /* VM context for GPUVM allocations */ struct file *drm_file; void *drm_priv; + atomic64_t tlb_seq; /* GPUVM allocations storage */ struct idr alloc_idr; @@ -1016,6 +1017,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu); int kfd_topology_remove_device(struct kfd_dev *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); +struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( + uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 59c04b2d383b..9e82d7aa67fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -722,7 +722,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, goto err_alloc_mem; err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, - pdd->drm_priv, NULL); + pdd->drm_priv); if (err) goto err_map_mem; @@ -1560,6 +1560,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, return ret; } pdd->drm_priv = drm_file->private_data; + atomic64_set(&pdd->tlb_seq, 0); ret = kfd_process_device_reserve_ib_mem(pdd); if (ret) @@ -1949,8 +1950,18 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) { + struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); + uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); struct kfd_dev *dev = pdd->dev; + /* + * It can be that we race and lose here, but that is extremely unlikely + * and the worst thing which could happen is that we flush the changes + * into the TLB once more which is harmless. + */ + if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq) + return; + if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { /* Nothing to flush until a VMID is assigned, which * only happens when the first queue is created. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b3fc3e958227..11b395b90a3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1188,9 +1188,9 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("[0x%llx 0x%llx]\n", start, last); - return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL, - start, last, init_pte_value, 0, - NULL, NULL, fence, NULL); + return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start, + last, init_pte_value, 0, 0, NULL, NULL, + fence); } static int @@ -1243,7 +1243,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, { struct amdgpu_device *adev = pdd->dev->adev; struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); - bool table_freed = false; uint64_t pte_flags; unsigned long last_start; int last_domain; @@ -1278,13 +1277,12 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); - r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, - NULL, last_start, - prange->start + i, pte_flags, - last_start - prange->start, - NULL, dma_addr, - &vm->last_update, - &table_freed); + r = amdgpu_vm_update_range(adev, vm, false, false, false, NULL, + last_start, prange->start + i, + pte_flags, + last_start - prange->start, + bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, + NULL, dma_addr, &vm->last_update); for (j = last_start - prange->start; j <= i; j++) dma_addr[j] |= last_domain; @@ -1306,8 +1304,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, if (fence) *fence = dma_fence_get(vm->last_update); - if (table_freed) - kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); out: return r; } @@ -1363,6 +1359,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, break; } } + + kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); } return r; @@ -1372,7 +1370,7 @@ struct svm_validate_context { struct kfd_process *process; struct svm_range *prange; bool intr; - unsigned long bitmap[MAX_GPU_INSTANCE]; + DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; struct list_head validate_list; struct ww_acquire_ctx ticket; @@ -2687,11 +2685,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("kfd process not founded pasid 0x%x\n", pasid); return 0; } - if (!p->xnack_enabled) { - pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); - r = -EFAULT; - goto out; - } svms = &p->svms; pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); @@ -2702,6 +2695,12 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } + if (!p->xnack_enabled) { + pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); + r = -EFAULT; + goto out; + } + /* p->lead_thread is available as kfd_process_wq_release flush the work * before releasing task ref. */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3bdcae239bc0..8b7710b4d3ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -46,22 +46,32 @@ static struct list_head topology_device_list; static struct kfd_system_properties sys_props; static DECLARE_RWSEM(topology_lock); -static atomic_t topology_crat_proximity_domain; +static uint32_t topology_crat_proximity_domain; -struct kfd_topology_device *kfd_topology_device_by_proximity_domain( +struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( uint32_t proximity_domain) { struct kfd_topology_device *top_dev; struct kfd_topology_device *device = NULL; - down_read(&topology_lock); - list_for_each_entry(top_dev, &topology_device_list, list) if (top_dev->proximity_domain == proximity_domain) { device = top_dev; break; } + return device; +} + +struct kfd_topology_device *kfd_topology_device_by_proximity_domain( + uint32_t proximity_domain) +{ + struct kfd_topology_device *device = NULL; + + down_read(&topology_lock); + + device = kfd_topology_device_by_proximity_domain_no_lock( + proximity_domain); up_read(&topology_lock); return device; @@ -1060,7 +1070,7 @@ int kfd_topology_init(void) down_write(&topology_lock); kfd_topology_update_device_list(&temp_topology_device_list, &topology_device_list); - atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1); + topology_crat_proximity_domain = sys_props.num_devices-1; ret = kfd_topology_update_sysfs(); up_write(&topology_lock); @@ -1295,8 +1305,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu) pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); - proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); - /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) { struct kfd_topology_device *top_dev; @@ -1321,12 +1329,16 @@ int kfd_topology_add_device(struct kfd_dev *gpu) */ dev = kfd_assign_gpu(gpu); if (!dev) { + down_write(&topology_lock); + proximity_domain = ++topology_crat_proximity_domain; + res = kfd_create_crat_image_virtual(&crat_image, &image_size, COMPUTE_UNIT_GPU, gpu, proximity_domain); if (res) { pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n", gpu_id); + topology_crat_proximity_domain--; return res; } res = kfd_parse_crat_table(crat_image, @@ -1335,10 +1347,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu) if (res) { pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n", gpu_id); + topology_crat_proximity_domain--; goto err; } - down_write(&topology_lock); kfd_topology_update_device_list(&temp_topology_device_list, &topology_device_list); @@ -1485,25 +1497,78 @@ err: return res; } +/** + * kfd_topology_update_io_links() - Update IO links after device removal. + * @proximity_domain: Proximity domain value of the dev being removed. + * + * The topology list currently is arranged in increasing order of + * proximity domain. + * + * Two things need to be done when a device is removed: + * 1. All the IO links to this device need to be removed. + * 2. All nodes after the current device node need to move + * up once this device node is removed from the topology + * list. As a result, the proximity domain values for + * all nodes after the node being deleted reduce by 1. + * This would also cause the proximity domain values for + * io links to be updated based on new proximity domain + * values. + * + * Context: The caller must hold write topology_lock. + */ +static void kfd_topology_update_io_links(int proximity_domain) +{ + struct kfd_topology_device *dev; + struct kfd_iolink_properties *iolink, *tmp; + + list_for_each_entry(dev, &topology_device_list, list) { + if (dev->proximity_domain > proximity_domain) + dev->proximity_domain--; + + list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) { + /* + * If there is an io link to the dev being deleted + * then remove that IO link also. + */ + if (iolink->node_to == proximity_domain) { + list_del(&iolink->list); + dev->io_link_count--; + dev->node_props.io_links_count--; + } else if (iolink->node_from > proximity_domain) { + iolink->node_from--; + } else if (iolink->node_to > proximity_domain) { + iolink->node_to--; + } + } + + } +} + int kfd_topology_remove_device(struct kfd_dev *gpu) { struct kfd_topology_device *dev, *tmp; uint32_t gpu_id; int res = -ENODEV; + int i = 0; down_write(&topology_lock); - list_for_each_entry_safe(dev, tmp, &topology_device_list, list) + list_for_each_entry_safe(dev, tmp, &topology_device_list, list) { if (dev->gpu == gpu) { gpu_id = dev->gpu_id; kfd_remove_sysfs_node_entry(dev); kfd_release_topology_device(dev); sys_props.num_devices--; + kfd_topology_update_io_links(i); + topology_crat_proximity_domain = sys_props.num_devices-1; + sys_props.generation_count++; res = 0; if (kfd_topology_update_sysfs() < 0) kfd_topology_release_sysfs(); break; } + i++; + } up_write(&topology_lock); |