summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_process.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c786
1 files changed, 523 insertions, 263 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 72df6286e240..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -28,7 +28,6 @@
#include <linux/sched/task.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
-#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
#include <linux/mman.h>
@@ -36,21 +35,22 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
+#include "amdgpu_reset.h"
struct mm_struct;
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_iommu.h"
#include "kfd_svm.h"
#include "kfd_smi_events.h"
+#include "kfd_debug.h"
/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
-static DEFINE_MUTEX(kfd_processes_mutex);
+DEFINE_MUTEX(kfd_processes_mutex);
DEFINE_SRCU(kfd_processes_srcu);
@@ -69,7 +69,6 @@ static struct kfd_process *find_process(const struct task_struct *thread,
bool ref);
static void kfd_process_ref_release(struct kref *ref);
static struct kfd_process *create_process(const struct task_struct *thread);
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
@@ -269,9 +268,12 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
int cu_cnt;
int wave_cnt;
int max_waves_per_cu;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
+ int i;
+ struct kfd_cu_occupancy *cu_occupancy;
+ u32 queue_format;
pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
dev = pdd->dev;
@@ -281,41 +283,64 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
- pr_debug("Gpu-Id: %d has no active queues for process %d\n",
- dev->id, proc->pasid);
+ pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
+ dev->id, (int)proc->lead_thread->pid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
- dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
- &max_waves_per_cu);
+
+ cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
+ if (!cu_occupancy)
+ return -ENOMEM;
+
+ /*
+ * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
+ * For AQL queues, because of cooperative dispatch we multiply the wave count
+ * by number of XCCs in the partition to get the total wave counts across all
+ * XCCs in the partition.
+ * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
+ */
+ dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
+ &max_waves_per_cu, ffs(dev->xcc_mask) - 1);
+
+ for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
+ if (cu_occupancy[i].wave_cnt != 0 &&
+ kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
+ cu_occupancy[i].doorbell_off,
+ &queue_format)) {
+ if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
+ wave_cnt += cu_occupancy[i].wave_cnt;
+ else
+ wave_cnt += (NUM_XCC(dev->xcc_mask) *
+ cu_occupancy[i].wave_cnt);
+ }
+ }
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
+ kfree(cu_occupancy);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- if (strcmp(attr->name, "pasid") == 0) {
- struct kfd_process *p = container_of(attr, struct kfd_process,
- attr_pasid);
-
- return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
- } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ if (strcmp(attr->name, "pasid") == 0)
+ return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
+ else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
- return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
- INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
- kfd_sdma_activity_worker);
+ INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
+ kfd_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
sdma_activity_work_handler.sdma_activity_counter = 0;
@@ -323,6 +348,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
flush_work(&sdma_activity_work_handler.sdma_activity_work);
+ destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
@@ -344,7 +370,7 @@ static const struct sysfs_ops kfd_procfs_ops = {
.show = kfd_procfs_show,
};
-static struct kobj_type procfs_type = {
+static const struct kobj_type procfs_type = {
.release = kfd_procfs_kobj_release,
.sysfs_ops = &kfd_procfs_ops,
};
@@ -469,7 +495,7 @@ static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
-static struct kobj_type procfs_queue_type = {
+static const struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_groups = procfs_queue_groups,
};
@@ -478,7 +504,7 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct kobj_type procfs_stats_type = {
+static const struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = kfd_procfs_kobj_release,
};
@@ -487,7 +513,7 @@ static const struct sysfs_ops sysfs_counters_ops = {
.show = kfd_sysfs_counters_show,
};
-static struct kobj_type sysfs_counters_type = {
+static const struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = kfd_procfs_kobj_release,
};
@@ -666,7 +692,8 @@ int kfd_process_create_wq(void)
if (!kfd_process_wq)
kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
if (!kfd_restore_wq)
- kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
+ WQ_FREEZABLE);
if (!kfd_process_wq || !kfd_restore_wq) {
kfd_process_destroy_wq();
@@ -691,7 +718,7 @@ void kfd_process_destroy_wq(void)
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_process_device *pdd, void **kptr)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
if (kptr && *kptr) {
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
@@ -713,7 +740,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
uint32_t flags, struct kgd_mem **mem, void **kptr)
{
- struct kfd_dev *kdev = pdd->dev;
+ struct kfd_node *kdev = pdd->dev;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
@@ -798,18 +825,27 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
}
-struct kfd_process *kfd_create_process(struct file *filep)
+struct kfd_process *kfd_create_process(struct task_struct *thread)
{
struct kfd_process *process;
- struct task_struct *thread = current;
int ret;
- if (!thread->mm)
+ if (!(thread->mm && mmget_not_zero(thread->mm)))
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
- if (thread->group_leader->mm != thread->mm)
+ if (thread->group_leader->mm != thread->mm) {
+ mmput(thread->mm);
return ERR_PTR(-EINVAL);
+ }
+
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
/*
* take kfd processes mutex before starting of process creation
@@ -818,8 +854,16 @@ struct kfd_process *kfd_create_process(struct file *filep)
*/
mutex_lock(&kfd_processes_mutex);
- /* A prior open of /dev/kfd could have already created the process. */
- process = find_process(thread, false);
+ if (kfd_is_locked(NULL)) {
+ pr_debug("KFD is locked! Cannot create process");
+ process = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ /* A prior open of /dev/kfd could have already created the process.
+ * find_process will increase process kref in this case
+ */
+ process = find_process(thread, true);
if (process) {
pr_debug("Process already found\n");
} else {
@@ -827,10 +871,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (IS_ERR(process))
goto out;
- ret = kfd_process_init_cwsr_apu(process, filep);
- if (ret)
- goto out_destroy;
-
if (!procfs.kobj)
goto out;
@@ -859,21 +899,16 @@ struct kfd_process *kfd_create_process(struct file *filep)
kfd_procfs_add_sysfs_stats(process);
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+
+ kfd_debugfs_add_process(process);
+
+ init_waitqueue_head(&process->wait_irq_drain);
}
out:
- if (!IS_ERR(process))
- kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
+ mmput(thread->mm);
return process;
-
-out_destroy:
- hash_del_rcu(&process->kfd_processes);
- mutex_unlock(&kfd_processes_mutex);
- synchronize_srcu(&kfd_processes_srcu);
- /* kfd_process_free_notifier will trigger the cleanup */
- mmu_notifier_put(&process->mmu_notifier);
- return ERR_PTR(ret);
}
struct kfd_process *kfd_get_process(const struct task_struct *thread)
@@ -982,7 +1017,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
{
struct kfd_process_device *pdd;
- struct kfd_dev *kdev;
+ struct kfd_node *kdev;
void *mem;
kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
@@ -1021,40 +1056,39 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
- pdd->dev->id, p->pasid);
+ kfd_smi_event_process(pdd, false);
+ pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
+ pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
kfd_process_device_destroy_ib_mem(pdd);
- if (pdd->drm_file) {
- amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->adev, pdd->drm_priv);
+ if (pdd->drm_file)
fput(pdd->drm_file);
- }
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(KFD_CWSR_TBA_TMA_SIZE));
- bitmap_free(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
- kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+ kfd_free_process_doorbells(pdd->dev->kfd, pdd);
- if (pdd->dev->shared_resources.enable_mes)
+ if (pdd->dev->kfd->shared_resources.enable_mes &&
+ pdd->proc_ctx_cpu_ptr)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
- pdd->proc_ctx_bo);
+ &pdd->proc_ctx_bo);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1105,6 +1139,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
p->kobj = NULL;
}
+/*
+ * If any GPU is ongoing reset, wait for reset complete.
+ */
+static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
+{
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -1114,29 +1159,34 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
+ /*
+ * If GPU in reset, user queues may still running, wait for reset complete.
+ */
+ kfd_process_wait_gpu_reset_complete(p);
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
- dma_fence_signal(p->ef);
+ synchronize_rcu();
+ ef = rcu_access_pointer(p->ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
- kfd_iommu_unbind_process(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
- dma_fence_put(p->ef);
+ dma_fence_put(ef);
kfd_event_free_process(p);
- kfd_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
@@ -1154,10 +1204,8 @@ static void kfd_process_ref_release(struct kref *ref)
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
{
- int idx = srcu_read_lock(&kfd_processes_srcu);
- struct kfd_process *p = find_process_by_mm(mm);
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ /* This increments p->ref counter if kfd process p exists */
+ struct kfd_process *p = kfd_lookup_process_by_mm(mm);
return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
}
@@ -1167,6 +1215,54 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+ int i;
+
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+ }
+
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+ kfd_dbg_trap_disable(p);
+
+ if (atomic_read(&p->debugged_process_count) > 0) {
+ struct kfd_process *target;
+ unsigned int temp;
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
+ if (target->debugger_process && target->debugger_process == p) {
+ mutex_lock_nested(&target->mutex, 1);
+ kfd_dbg_trap_disable(target);
+ mutex_unlock(&target->mutex);
+ if (atomic_read(&p->debugged_process_count) == 0)
+ break;
+ }
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
+
+ mmu_notifier_put(&p->mmu_notifier);
+}
+
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -1181,17 +1277,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
+ /*
+ * Do early return if table is empty.
+ *
+ * This could potentially happen if this function is called concurrently
+ * by mmu_notifier and by kfd_cleanup_pocesses.
+ *
+ */
+ if (hash_empty(kfd_processes_table)) {
+ mutex_unlock(&kfd_processes_mutex);
+ return;
+ }
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
- /* Indicate to other users that MM is no longer valid */
- p->mm = NULL;
-
- mmu_notifier_put(&p->mmu_notifier);
+ kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@@ -1200,16 +1301,56 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released. We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+ struct kfd_process *p;
+ struct hlist_node *p_temp;
+ unsigned int temp;
+ HLIST_HEAD(cleanup_list);
+
+ /*
+ * Move all remaining kfd_process from the process table to a
+ * temp list for processing. Once done, callback from mmu_notifier
+ * release will not see the kfd_process in the table and do early return,
+ * avoiding double free issues.
+ */
+ mutex_lock(&kfd_processes_mutex);
+ hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+ hash_del_rcu(&p->kfd_processes);
+ synchronize_srcu(&kfd_processes_srcu);
+ hlist_add_head(&p->kfd_processes, &cleanup_list);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+
+ hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+ kfd_process_notifier_release_internal(p);
+
+ /*
+ * Ensures that all outstanding free_notifier get called, triggering
+ * the release of the kfd_process struct.
+ */
+ mmu_notifier_synchronize();
+}
+
+int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
int i;
+ if (p->has_cwsr)
+ return 0;
+
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
struct qcm_process_device *qpd = &p->pdds[i]->qpd;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
@@ -1220,25 +1361,30 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
if (IS_ERR_VALUE(qpd->tba_addr)) {
int err = qpd->tba_addr;
- pr_err("Failure to set tba address. error %d.\n", err);
+ dev_err(dev->adev->dev,
+ "Failure to set tba address. error %d.\n", err);
qpd->tba_addr = 0;
qpd->cwsr_kaddr = NULL;
return err;
}
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
}
+ p->has_cwsr = true;
+
return 0;
}
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
@@ -1247,7 +1393,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
void *kaddr;
int ret;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
return 0;
/* cwsr_base is only set for dGPU */
@@ -1260,7 +1406,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(&pdd->qpd,
+ pdd->process->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
@@ -1271,10 +1420,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
return;
kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
@@ -1318,7 +1467,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* support retry.
*/
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
/* Only consider GFXv9 and higher GPUs. Older GPUs don't
* support the SVM APIs and don't need to be considered
@@ -1330,8 +1479,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
+ if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
+ if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
+ pr_debug("SRIOV platform xnack not supported\n");
+ return false;
+ }
continue;
+ }
/* GFXv10 and later GPUs do not support shader preemption
* during page faults. This can lead to poor QoS for queue
@@ -1341,13 +1495,23 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
- if (dev->noretry)
+ if (dev->kfd->noretry)
return false;
}
return true;
}
+void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
+ bool enabled)
+{
+ if (qpd->cwsr_kaddr) {
+ uint64_t *tma =
+ (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+ tma[2] = enabled;
+ }
+}
+
/*
* On return the kfd_process is fully operational and will be freed when the
* mm is released
@@ -1375,12 +1539,11 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err)
goto err_event_init;
process->is_32bit_user_mode = in_compat_syscall();
-
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0) {
- err = -ENOSPC;
- goto err_alloc_pasid;
- }
+ process->debug_trap_enabled = false;
+ process->debugger_process = NULL;
+ process->exception_enable_mask = 0;
+ atomic_set(&process->debugged_process_count, 0);
+ sema_init(&process->runtime_enable_sema, 0);
err = pqm_init(&process->pqm, process);
if (err != 0)
@@ -1422,6 +1585,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
kfd_unref_process(process);
get_task_struct(process->lead_thread);
+ INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
+
return process;
err_register_notifier:
@@ -1433,8 +1598,6 @@ err_init_svm_range_list:
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- kfd_pasid_free(process->pasid);
-err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
@@ -1443,39 +1606,7 @@ err_alloc_process:
return ERR_PTR(err);
}
-static int init_doorbell_bitmap(struct qcm_process_device *qpd,
- struct kfd_dev *dev)
-{
- unsigned int i;
- int range_start = dev->shared_resources.non_cp_doorbells_start;
- int range_end = dev->shared_resources.non_cp_doorbells_end;
-
- if (!KFD_IS_SOC15(dev))
- return 0;
-
- qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- GFP_KERNEL);
- if (!qpd->doorbell_bitmap)
- return -ENOMEM;
-
- /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
- pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
- pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
- range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
-
- for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= range_start && i <= range_end) {
- __set_bit(i, qpd->doorbell_bitmap);
- __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- qpd->doorbell_bitmap);
- }
- }
-
- return 0;
-}
-
-struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
int i;
@@ -1487,11 +1618,10 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
return NULL;
}
-struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
- int retval = 0;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL;
@@ -1499,11 +1629,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
- pr_err("Failed to init doorbell for process\n");
- goto err_free_pdd;
- }
-
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
@@ -1515,35 +1640,24 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
- pdd->vram_usage = 0;
+ atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
- if (dev->shared_resources.enable_mes) {
- retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (retval) {
- pr_err("failed to allocate process context bo\n");
- goto err_free_pdd;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
p->pdds[p->n_pdds++] = pdd;
+ if (kfd_dbg_is_per_vmid_supported(pdd->dev))
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ false,
+ 0);
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
- return pdd;
+ atomic_inc(&dev->kfd->kfd_processes_count);
-err_free_pdd:
- kfree(pdd);
- return NULL;
+ return pdd;
}
/**
@@ -1566,7 +1680,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
struct kfd_process *p;
- struct kfd_dev *dev;
+ struct dma_fence *ef;
+ struct kfd_node *dev;
int ret;
if (!drm_file)
@@ -1585,13 +1700,16 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &p->ef);
+ p->ef ? NULL : &ef);
if (ret) {
- pr_err("Failed to create process VM object\n");
+ dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret;
}
+
+ if (!p->ef)
+ RCU_INIT_POINTER(p->ef, ef);
+
pdd->drm_priv = drm_file->private_data;
- atomic64_set(&pdd->tlb_seq, 0);
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
@@ -1600,15 +1718,21 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
- if (ret)
- goto err_set_pasid;
+ if (unlikely(!avm->pasid)) {
+ dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
+ avm);
+ ret = -EINVAL;
+ goto err_get_pasid;
+ }
+ pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
-err_set_pasid:
+err_get_pasid:
kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
@@ -1626,7 +1750,7 @@ err_reserve_ib_mem:
*
* Assumes that the process lock is held.
*/
-struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd;
@@ -1634,7 +1758,7 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
- pr_err("Process device data doesn't exist\n");
+ dev_err(dev->adev->dev, "Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
}
@@ -1654,10 +1778,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
}
}
- err = kfd_iommu_bind_process_to_device(pdd);
- if (err)
- goto out;
-
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
@@ -1665,15 +1785,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
pdd->runtime_inuse = true;
return pdd;
-
-out:
- /* balance runpm reference count and exit with error */
- if (!pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
- }
-
- return ERR_PTR(err);
}
/* Create specific handle mapped to mem from process local memory idr
@@ -1707,25 +1818,50 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
idr_remove(&pdd->alloc_idr, handle);
}
-/* This increments the process->ref counter. */
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
+static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
{
- struct kfd_process *p, *ret_p = NULL;
+ struct kfd_process_device *ret_p = NULL;
+ struct kfd_process *p;
unsigned int temp;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
+ int i;
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (p->pasid == pasid) {
- kref_get(&p->ref);
- ret_p = p;
- break;
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->pasid == pasid) {
+ ret_p = p->pdds[i];
+ break;
+ }
}
+ if (ret_p)
+ break;
+ }
+ return ret_p;
+}
+
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd)
+{
+ struct kfd_process_device *ret_p;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ ret_p = kfd_lookup_process_device_by_pasid(pasid);
+ if (ret_p) {
+ if (pdd)
+ *pdd = ret_p;
+ kref_get(&ret_p->process->ref);
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ return ret_p->process;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return ret_p;
+ if (pdd)
+ *pdd = NULL;
+
+ return NULL;
}
/* This increments the process->ref counter. */
@@ -1757,6 +1893,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ struct device *dev = pdd->dev->adev->dev;
kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
trigger);
@@ -1768,10 +1905,12 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
* them been add back since they actually not be saved right now.
*/
if (r && r != -EIO) {
- pr_err("Failed to evict process queues\n");
+ dev_err(dev, "Failed to evict process queues\n");
goto fail;
}
n_evicted++;
+
+ pdd->dev->dqm->is_hws_hang = false;
}
return r;
@@ -1790,7 +1929,8 @@ fail:
if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd))
- pr_err("Failed to restore queues\n");
+ dev_err(pdd->dev->adev->dev,
+ "Failed to restore queues\n");
n_evicted--;
}
@@ -1806,13 +1946,14 @@ int kfd_process_restore_queues(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ struct device *dev = pdd->dev->adev->dev;
kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
- pr_err("Failed to restore process queues\n");
+ dev_err(dev, "Failed to restore process queues\n");
if (!ret)
ret = r;
}
@@ -1832,13 +1973,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
}
int
-kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
- uint32_t *gpuid, uint32_t *gpuidx)
+kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
+ uint32_t *gpuid, uint32_t *gpuidx)
{
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
+ if (p->pdds[i] && p->pdds[i]->dev == node) {
*gpuid = p->pdds[i]->user_gpu_id;
*gpuidx = i;
return 0;
@@ -1846,6 +1987,23 @@ kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
return -EINVAL;
}
+static int signal_eviction_fence(struct kfd_process *p)
+{
+ struct dma_fence *ef;
+ int ret;
+
+ rcu_read_lock();
+ ef = dma_fence_get_rcu_safe(&p->ef);
+ rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
+
+ ret = dma_fence_signal(ef);
+ dma_fence_put(ef);
+
+ return ret;
+}
+
static void evict_process_worker(struct work_struct *work)
{
int ret;
@@ -1858,29 +2016,45 @@ static void evict_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, eviction_work);
- WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
- "Eviction fence mismatch\n");
-
- /* Narrow window of overlap between restore and evict work
- * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
- * unreserves KFD BOs, it is possible to evicted again. But
- * restore has few more steps of finish. So lets wait for any
- * previous restore work to complete
- */
- flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
- dma_fence_signal(p->ef);
- dma_fence_put(p->ef);
- p->ef = NULL;
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+ /* If another thread already signaled the eviction fence,
+ * they are responsible stopping the queues and scheduling
+ * the restore work.
+ */
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
- pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
} else
- pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
+}
+
+static int restore_process_helper(struct kfd_process *p)
+{
+ int ret = 0;
+
+ /* VMs may not have been acquired yet during debugging. */
+ if (p->kgd_process_info) {
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
+ p->kgd_process_info, &p->ef);
+ if (ret)
+ return ret;
+ }
+
+ ret = kfd_process_restore_queues(p);
+ if (!ret)
+ pr_debug("Finished restoring process pid %d\n",
+ p->lead_thread->pid);
+ else
+ pr_err("Failed to restore queues of process pid %d\n",
+ p->lead_thread->pid);
+
+ return ret;
}
static void restore_process_worker(struct work_struct *work)
@@ -1895,7 +2069,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1908,22 +2082,15 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
- &p->ef);
+
+ ret = restore_process_helper(p);
if (ret) {
- pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
- p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
- return;
+ pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
+ p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
-
- ret = kfd_process_restore_queues(p);
- if (!ret)
- pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
- else
- pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1934,14 +2101,9 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
- pr_err("Failed to suspend process 0x%x\n", p->pasid);
- dma_fence_signal(p->ef);
- dma_fence_put(p->ef);
- p->ef = NULL;
+ pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
+ signal_eviction_fence(p);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
@@ -1953,9 +2115,9 @@ int kfd_resume_all_processes(void)
int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
- pr_err("Restore process %d failed during resume\n",
- p->pasid);
+ if (restore_process_helper(p)) {
+ pr_err("Restore process pid %d failed during resume\n",
+ p->lead_thread->pid);
ret = -EFAULT;
}
}
@@ -1963,14 +2125,14 @@ int kfd_resume_all_processes(void)
return ret;
}
-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
struct kfd_process_device *pdd;
struct qcm_process_device *qpd;
if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
- pr_err("Incorrect CWSR mapping size.\n");
+ dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
return -EINVAL;
}
@@ -1982,43 +2144,142 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(KFD_CWSR_TBA_TMA_SIZE));
if (!qpd->cwsr_kaddr) {
- pr_err("Error allocating per process CWSR buffer.\n");
+ dev_err(dev->adev->dev,
+ "Error allocating per process CWSR buffer.\n");
return -ENOMEM;
}
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
- | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
-void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
+/* assumes caller holds process lock. */
+int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
{
- struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
- uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
- struct kfd_dev *dev = pdd->dev;
+ uint32_t irq_drain_fence[8];
+ uint8_t node_id = 0;
+ int r = 0;
+
+ if (!KFD_IS_SOC15(pdd->dev))
+ return 0;
+
+ pdd->process->irq_drain_is_open = true;
+
+ memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
+ irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
+ KFD_IRQ_FENCE_CLIENTID;
+ irq_drain_fence[3] = pdd->pasid;
/*
- * It can be that we race and lose here, but that is extremely unlikely
- * and the worst thing which could happen is that we flush the changes
- * into the TLB once more which is harmless.
+ * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
*/
- if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
- return;
+ if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
+ node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
+ irq_drain_fence[3] |= node_id << 16;
+ }
- if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- /* Nothing to flush until a VMID is assigned, which
- * only happens when the first queue is created.
- */
- if (pdd->qpd.vmid)
- amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
- pdd->qpd.vmid);
- } else {
- amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
- pdd->process->pasid, type);
+ /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
+ if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
+ irq_drain_fence)) {
+ pdd->process->irq_drain_is_open = false;
+ return 0;
}
+
+ r = wait_event_interruptible(pdd->process->wait_irq_drain,
+ !READ_ONCE(pdd->process->irq_drain_is_open));
+ if (r)
+ pdd->process->irq_drain_is_open = false;
+
+ return r;
+}
+
+void kfd_process_close_interrupt_drain(unsigned int pasid)
+{
+ struct kfd_process *p;
+
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
+
+ if (!p)
+ return;
+
+ WRITE_ONCE(p->irq_drain_is_open, false);
+ wake_up_all(&p->wait_irq_drain);
+ kfd_unref_process(p);
+}
+
+struct send_exception_work_handler_workarea {
+ struct work_struct work;
+ struct kfd_process *p;
+ unsigned int queue_id;
+ uint64_t error_reason;
+};
+
+static void send_exception_work_handler(struct work_struct *work)
+{
+ struct send_exception_work_handler_workarea *workarea;
+ struct kfd_process *p;
+ struct queue *q;
+ struct mm_struct *mm;
+ struct kfd_context_save_area_header __user *csa_header;
+ uint64_t __user *err_payload_ptr;
+ uint64_t cur_err;
+ uint32_t ev_id;
+
+ workarea = container_of(work,
+ struct send_exception_work_handler_workarea,
+ work);
+ p = workarea->p;
+
+ mm = get_task_mm(p->lead_thread);
+
+ if (!mm)
+ return;
+
+ kthread_use_mm(mm);
+
+ q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
+
+ if (!q)
+ goto out;
+
+ csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
+
+ get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
+ get_user(cur_err, err_payload_ptr);
+ cur_err |= workarea->error_reason;
+ put_user(cur_err, err_payload_ptr);
+ get_user(ev_id, &csa_header->err_event_id);
+
+ kfd_set_event(p, ev_id);
+
+out:
+ kthread_unuse_mm(mm);
+ mmput(mm);
+}
+
+int kfd_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int queue_id,
+ uint64_t error_reason)
+{
+ struct send_exception_work_handler_workarea worker;
+
+ INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
+
+ worker.p = p;
+ worker.queue_id = queue_id;
+ worker.error_reason = error_reason;
+
+ schedule_work(&worker.work);
+ flush_work(&worker.work);
+ destroy_work_on_stack(&worker.work);
+
+ return 0;
}
struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
@@ -2063,8 +2324,8 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID 0x%x:\n",
- p->lead_thread->tgid, p->pasid);
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->lead_thread->pid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
@@ -2080,4 +2341,3 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
}
#endif
-