summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c254
1 files changed, 191 insertions, 63 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index cd185b9636d2..995549d0bbbc 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -148,7 +148,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
@@ -197,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "---\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n",
- state->time.tv_sec, state->time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &state->time);
if (state->comm)
drm_printf(&p, "comm: %s\n", state->comm);
if (state->cmd)
@@ -219,43 +218,152 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct drm_gem_object *obj, u64 iova, bool full)
+ struct drm_gem_object *obj, u64 iova,
+ bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
- state_bo->size = obj->size;
+ state_bo->size = size;
+ state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
- BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
+ BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
- memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
+ memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
+ state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(obj);
ptr = msm_gem_get_vaddr_active(obj);
- msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->size);
- msm_gem_put_vaddr(obj);
+ memcpy(state_bo->data, ptr + offset, size);
+ msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
+static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
+{
+ extern bool rd_full;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_exec exec;
+ struct drm_gpuva *vma;
+ unsigned cnt = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ cnt = 0;
+
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ if (!vma->gem.obj)
+ continue;
+
+ cnt++;
+ drm_exec_lock_obj(&exec, vma->gem.obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ }
+
+ drm_gpuvm_for_each_va (vma, submit->vm)
+ cnt++;
+
+ state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ if (state->bos)
+ drm_gpuvm_for_each_va(vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
+
+ drm_exec_fini(&exec);
+ } else {
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (int i = 0; state->bos && i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ msm_gem_lock(obj);
+ msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
+ dump, 0, obj->size);
+ msm_gem_unlock(obj);
+ }
+ }
+}
+
+static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
+{
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ int first;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ state->nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ state->nr_vm_logs = vm_log_len;
+ }
+
+ state->vm_logs = kmalloc_array(
+ state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ if (!state->vm_logs) {
+ state->nr_vm_logs = 0;
+ }
+
+ for (int i = 0; i < state->nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+
+ state->vm_logs[i] = vm->log[idx];
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
+
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
struct msm_gpu_state *state;
@@ -274,19 +382,21 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
- state->fault_info = gpu->fault_info;
+ if (fault_info)
+ state->fault_info = *fault_info;
- if (submit) {
- int i;
+ if (submit && state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
- state->bos = kcalloc(submit->nr_bos,
- sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
- for (i = 0; state->bos && i < submit->nr_bos; i++) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova,
- should_dump(submit, i));
- }
+ if (submit) {
+ crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
+ crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
@@ -297,7 +407,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
}
#endif
@@ -328,7 +439,7 @@ static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
- struct msm_file_private *ctx = submit->queue->ctx;
+ struct msm_context *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
@@ -358,6 +469,7 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
@@ -375,8 +487,24 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ gpu->global_faults++;
+ else {
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+
+ vm->faults++;
+
+ /*
+ * If userspace has opted-in to VM_BIND (and therefore userspace
+ * management of the VM), faults mark the VM as unusable. This
+ * matches vulkan expectations (vulkan is the main target for
+ * VM_BIND).
+ */
+ if (!vm->managed)
+ msm_gem_vm_unusable(submit->vm);
+ }
get_comm_cmdline(submit, &comm, &cmd);
@@ -394,7 +522,7 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
@@ -434,8 +562,15 @@ static void recover_worker(struct kthread_work *work)
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node)
+ list_for_each_entry(submit, &ring->submits, node) {
+ /*
+ * If the submit uses an unusable vm make sure
+ * we don't actually run it
+ */
+ if (to_msm_vm(submit->vm)->unusable)
+ submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
+ }
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
@@ -448,9 +583,8 @@ out_unlock:
msm_gpu_retire(gpu);
}
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
@@ -473,16 +607,13 @@ static void fault_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
- memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
mutex_unlock(&gpu->lock);
}
@@ -510,7 +641,7 @@ static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+ struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
@@ -781,7 +912,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
@@ -818,10 +949,12 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
- struct msm_gem_address_space *aspace = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
@@ -829,16 +962,16 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
* If the target doesn't support private address spaces then return
* the global one
*/
- if (gpu->funcs->create_private_address_space) {
- aspace = gpu->funcs->create_private_address_space(gpu);
- if (!IS_ERR(aspace))
- aspace->pid = get_pid(task_pid(task));
+ if (gpu->funcs->create_private_vm) {
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
+ if (!IS_ERR(vm))
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
- if (IS_ERR_OR_NULL(aspace))
- aspace = msm_gem_address_space_get(gpu->aspace);
+ if (IS_ERR_OR_NULL(vm))
+ vm = drm_gpuvm_get(gpu->vm);
- return aspace;
+ return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -857,7 +990,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "gpu-worker");
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
@@ -871,7 +1004,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
- kthread_init_work(&gpu->fault_work, fault_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
@@ -929,24 +1061,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
-
- gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
-
- if (gpu->aspace == NULL)
- DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- else if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
+ gpu->vm = gpu->funcs->create_vm(gpu, pdev);
+ if (IS_ERR(gpu->vm)) {
+ ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
@@ -990,7 +1117,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1007,11 +1134,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
- if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
- msm_gem_address_space_put(gpu->aspace);
+ if (!IS_ERR_OR_NULL(gpu->vm)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {