summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_process.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c62
1 files changed, 53 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fe0cd49d4ea7..d27221ddcdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- int val = 0;
-
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
- val = p->pasid;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+ } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+ attr_vram);
+ if (pdd)
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
- return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+ return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+ int ret = 0;
+ struct kfd_process_device *pdd;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->kobj)
+ return -EFAULT;
+
+ /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+ pdd->dev->id);
+ pdd->attr_vram.name = pdd->vram_filename;
+ pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_vram);
+ ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+ if (ret)
+ pr_warn("Creating vram usage for gpu id %d failed",
+ (int)pdd->dev->id);
+ }
+
+ return ret;
+}
+
+
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
return err;
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
+
+ ret = kfd_procfs_add_vram_usage(process);
+ if (ret)
+ pr_warn("Creating vram usage file for pid %d failed",
+ (int)process->lead_thread->pid);
}
out:
if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct kfd_process_device *pdd;
/* Remove the procfs files */
if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
+ pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
+ pdd->vram_usage = 0;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
return p;
}
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
return r;
}
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
struct kfd_process_device *pdd;