summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c678
1 files changed, 359 insertions, 319 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0ebf7bc6ad09..995549d0bbbc 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -4,17 +4,17 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include "drm/drm_drv.h"
+
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
#include "msm_gpu_trace.h"
-#include "adreno/adreno_gpu.h"
+//#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
-#include <linux/devfreq.h>
-#include <linux/devfreq_cooling.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
@@ -22,106 +22,6 @@
* Power Management:
*/
-static int msm_devfreq_target(struct device *dev, unsigned long *freq,
- u32 flags)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
- struct dev_pm_opp *opp;
-
- opp = devfreq_recommended_opp(dev, freq, flags);
-
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
-
- if (gpu->funcs->gpu_set_freq)
- gpu->funcs->gpu_set_freq(gpu, opp);
- else
- clk_set_rate(gpu->core_clk, *freq);
-
- dev_pm_opp_put(opp);
-
- return 0;
-}
-
-static int msm_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *status)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
- ktime_t time;
-
- if (gpu->funcs->gpu_get_freq)
- status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
- else
- status->current_frequency = clk_get_rate(gpu->core_clk);
-
- status->busy_time = gpu->funcs->gpu_busy(gpu);
-
- time = ktime_get();
- status->total_time = ktime_us_delta(time, gpu->devfreq.time);
- gpu->devfreq.time = time;
-
- return 0;
-}
-
-static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
-
- if (gpu->funcs->gpu_get_freq)
- *freq = gpu->funcs->gpu_get_freq(gpu);
- else
- *freq = clk_get_rate(gpu->core_clk);
-
- return 0;
-}
-
-static struct devfreq_dev_profile msm_devfreq_profile = {
- .polling_ms = 10,
- .target = msm_devfreq_target,
- .get_dev_status = msm_devfreq_get_dev_status,
- .get_cur_freq = msm_devfreq_get_cur_freq,
-};
-
-static void msm_devfreq_init(struct msm_gpu *gpu)
-{
- /* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
- return;
-
- msm_devfreq_profile.initial_freq = gpu->fast_rate;
-
- /*
- * Don't set the freq_table or max_state and let devfreq build the table
- * from OPP
- * After a deferred probe, these may have be left to non-zero values,
- * so set them back to zero before creating the devfreq device
- */
- msm_devfreq_profile.freq_table = NULL;
- msm_devfreq_profile.max_state = 0;
-
- gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
- &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
-
- if (IS_ERR(gpu->devfreq.devfreq)) {
- DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
- gpu->devfreq.devfreq = NULL;
- return;
- }
-
- devfreq_suspend_device(gpu->devfreq.devfreq);
-
- gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
- gpu->devfreq.devfreq);
- if (IS_ERR(gpu->cooling)) {
- DRM_DEV_ERROR(&gpu->pdev->dev,
- "Couldn't register GPU cooling device\n");
- gpu->cooling = NULL;
- }
-}
-
static int enable_pwrrail(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
@@ -158,7 +58,7 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
if (gpu->core_clk && gpu->fast_rate)
- clk_set_rate(gpu->core_clk, gpu->fast_rate);
+ dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
if (gpu->rbbmtimer_clk)
@@ -177,7 +77,7 @@ static int disable_clk(struct msm_gpu *gpu)
* will be rounded down to zero anyway so it all works out.
*/
if (gpu->core_clk)
- clk_set_rate(gpu->core_clk, 27000000);
+ dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 0);
@@ -196,14 +96,6 @@ static int disable_axi(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
-{
- gpu->devfreq.busy_cycles = 0;
- gpu->devfreq.time = ktime_get();
-
- devfreq_resume_device(gpu->devfreq.devfreq);
-}
-
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@@ -223,7 +115,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- msm_gpu_resume_devfreq(gpu);
+ msm_devfreq_resume(gpu);
gpu->needs_hw_init = true;
@@ -237,7 +129,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
trace_msm_gpu_suspend(0);
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ msm_devfreq_suspend(gpu);
ret = disable_axi(gpu);
if (ret)
@@ -256,11 +148,19 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
+ struct drm_printer *p)
+{
+ drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
+ drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
+ drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
+}
+
int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
- WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
@@ -274,22 +174,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
- uint32_t fence)
-{
- struct msm_gem_submit *submit;
-
- spin_lock(&ring->submit_lock);
- list_for_each_entry(submit, &ring->submits, node) {
- if (submit->seqno > fence)
- break;
-
- msm_update_fence(submit->ring->fctx,
- submit->fence->seqno);
- }
- spin_unlock(&ring->submit_lock);
-}
-
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
@@ -313,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "---\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n",
- state->time.tv_sec, state->time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &state->time);
if (state->comm)
drm_printf(&p, "comm: %s\n", state->comm);
if (state->cmd)
@@ -335,40 +218,152 @@ static void msm_gpu_devcoredump_free(void *data)
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
- struct msm_gem_object *obj, u64 iova, u32 flags)
+ struct drm_gem_object *obj, u64 iova,
+ bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
- state_bo->size = obj->base.size;
+ state_bo->size = size;
+ state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
- /* Only store data for non imported buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
+ BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
+
+ memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
+
+ if (full) {
void *ptr;
- state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
- msm_gem_lock(&obj->base);
- ptr = msm_gem_get_vaddr_active(&obj->base);
- msm_gem_unlock(&obj->base);
+ ptr = msm_gem_get_vaddr_active(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
- memcpy(state_bo->data, ptr, obj->base.size);
- msm_gem_put_vaddr(&obj->base);
+ memcpy(state_bo->data, ptr + offset, size);
+ msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
+static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
+{
+ extern bool rd_full;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ struct drm_exec exec;
+ struct drm_gpuva *vma;
+ unsigned cnt = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ cnt = 0;
+
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&exec);
+
+ drm_gpuvm_for_each_va (vma, submit->vm) {
+ if (!vma->gem.obj)
+ continue;
+
+ cnt++;
+ drm_exec_lock_obj(&exec, vma->gem.obj);
+ drm_exec_retry_on_contention(&exec);
+ }
+
+ }
+
+ drm_gpuvm_for_each_va (vma, submit->vm)
+ cnt++;
+
+ state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ if (state->bos)
+ drm_gpuvm_for_each_va(vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
+
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
+
+ drm_exec_fini(&exec);
+ } else {
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (int i = 0; state->bos && i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
+
+ msm_gem_lock(obj);
+ msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
+ dump, 0, obj->size);
+ msm_gem_unlock(obj);
+ }
+ }
+}
+
+static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
+{
+ uint32_t vm_log_len = (1 << vm->log_shift);
+ uint32_t vm_log_mask = vm_log_len - 1;
+ int first;
+
+ /* Bail if no log, or empty log: */
+ if (!vm->log || !vm->log[0].op)
+ return;
+
+ mutex_lock(&vm->mmu_lock);
+
+ /*
+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
+ * first, entry (other than the special case handled below where the
+ * log hasn't wrapped around yet)
+ */
+ first = vm->log_idx;
+
+ if (!vm->log[first].op) {
+ /*
+ * If the next log entry has not been written yet, then only
+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
+ * yet)
+ */
+ state->nr_vm_logs = MAX(0, first - 1);
+ first = 0;
+ } else {
+ state->nr_vm_logs = vm_log_len;
+ }
+
+ state->vm_logs = kmalloc_array(
+ state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ if (!state->vm_logs) {
+ state->nr_vm_logs = 0;
+ }
+
+ for (int i = 0; i < state->nr_vm_logs; i++) {
+ int idx = (i + first) & vm_log_mask;
+
+ state->vm_logs[i] = vm->log[idx];
+ }
+
+ mutex_unlock(&vm->mmu_lock);
+}
+
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
struct msm_gpu_state *state;
@@ -387,50 +382,33 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
- state->fault_info = gpu->fault_info;
-
- if (submit) {
- int i, nr = 0;
-
- /* count # of buffers to dump: */
- for (i = 0; i < submit->nr_bos; i++)
- if (should_dump(submit, i))
- nr++;
- /* always dump cmd bo's, but don't double count them: */
- for (i = 0; i < submit->nr_cmds; i++)
- if (!should_dump(submit, submit->cmd[i].idx))
- nr++;
-
- state->bos = kcalloc(nr,
- sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ if (fault_info)
+ state->fault_info = *fault_info;
- for (i = 0; i < submit->nr_bos; i++) {
- if (should_dump(submit, i)) {
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
- }
- }
+ if (submit && state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
- for (i = 0; state->bos && i < submit->nr_cmds; i++) {
- int idx = submit->cmd[i].idx;
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
- if (!should_dump(submit, submit->cmd[i].idx)) {
- msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
- submit->bos[idx].iova, submit->bos[idx].flags);
- }
- }
+ if (submit) {
+ crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
+ crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
- /* FIXME: Release the crashstate if this errors out? */
- dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
}
#endif
@@ -443,21 +421,46 @@ static struct msm_gem_submit *
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
{
struct msm_gem_submit *submit;
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno == fence) {
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
return submit;
}
}
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
return NULL;
}
static void retire_submits(struct msm_gpu *gpu);
+static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
+{
+ struct msm_context *ctx = submit->queue->ctx;
+ struct task_struct *task;
+
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
+ /* Note that kstrdup will return NULL if argument is NULL: */
+ *comm = kstrdup(ctx->comm, GFP_KERNEL);
+ *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ return;
+
+ if (!*comm)
+ *comm = kstrdup(task->comm, GFP_KERNEL);
+
+ if (!*cmd)
+ *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+
+ put_task_struct(task);
+}
+
static void recover_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
@@ -466,49 +469,60 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
- if (submit) {
- struct task_struct *task;
- /* Increment the fault counts */
+ /*
+ * If the submit retired while we were waiting for the worker to run,
+ * or waiting to acquire the gpu lock, then nothing more to do.
+ */
+ if (!submit)
+ goto out_unlock;
+
+ /* Increment the fault counts */
+ submit->queue->faults++;
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
gpu->global_faults++;
- submit->queue->faults++;
+ else {
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ vm->faults++;
- /* msm_rd_dump_submit() needs bo locked to dump: */
- for (i = 0; i < submit->nr_bos; i++)
- msm_gem_lock(&submit->bos[i].obj->base);
+ /*
+ * If userspace has opted-in to VM_BIND (and therefore userspace
+ * management of the VM), faults mark the VM as unusable. This
+ * matches vulkan expectations (vulkan is the main target for
+ * VM_BIND).
+ */
+ if (!vm->managed)
+ msm_gem_vm_unusable(submit->vm);
+ }
- if (comm && cmd) {
- DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, comm, cmd);
+ get_comm_cmdline(submit, &comm, &cmd);
- msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", comm, cmd);
- } else {
- msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
+ if (comm && cmd) {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", comm, cmd);
+ } else {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
- for (i = 0; i < submit->nr_bos; i++)
- msm_gem_unlock(&submit->bos[i].obj->base);
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
@@ -528,18 +542,16 @@ static void recover_worker(struct kthread_work *work)
* one more to clear the faulting submit
*/
if (ring == cur_ring)
- fence++;
+ ring->memptrs->fence = ++fence;
- update_fences(gpu, ring, fence);
+ msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
- pm_runtime_put_sync(&gpu->pdev->dev);
/*
* Replay all remaining submits starting with highest priority
@@ -547,42 +559,44 @@ static void recover_worker(struct kthread_work *work)
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
-
- spin_lock(&ring->submit_lock);
- list_for_each_entry(submit, &ring->submits, node)
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_for_each_entry(submit, &ring->submits, node) {
+ /*
+ * If the submit uses an unusable vm make sure
+ * we don't actually run it
+ */
+ if (to_msm_vm(submit->vm)->unusable)
+ submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
- spin_unlock(&ring->submit_lock);
+ }
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
- mutex_unlock(&dev->struct_mutex);
+ pm_runtime_put(&gpu->pdev->dev);
+
+out_unlock:
+ mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
- struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
goto resume_smmu;
if (submit) {
- struct task_struct *task;
-
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
/*
* When we get GPU iova faults, we can get 1000s of them,
@@ -593,17 +607,14 @@ static void fault_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
- memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
@@ -613,9 +624,24 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
}
+static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
+ return false;
+
+ if (!gpu->funcs->progress)
+ return false;
+
+ if (!gpu->funcs->progress(gpu, ring))
+ return false;
+
+ ring->hangcheck_progress_retries++;
+ return true;
+}
+
static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+ struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
@@ -623,25 +649,28 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence < ring->seqno) {
+ ring->hangcheck_progress_retries = 0;
+ } else if (fence_before(fence, ring->fctx->last_fence) &&
+ !made_progress(gpu, ring)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
+ ring->hangcheck_progress_retries = 0;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, ring->seqno);
+ gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (ring->seqno > ring->hangcheck_fence)
+ if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
- kthread_queue_work(gpu->worker, &gpu->retire_work);
+ msm_gpu_retire(gpu);
}
/*
@@ -751,39 +780,46 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
{
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
volatile struct msm_gpu_submit_stats *stats;
- u64 elapsed, clock = 0;
- int i;
+ u64 elapsed, clock = 0, cycles;
+ unsigned long flags;
stats = &ring->memptrs->stats[index];
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
do_div(elapsed, 192);
+ cycles = stats->cpcycles_end - stats->cpcycles_start;
+
/* Calculate the clock frequency from the number of CP cycles */
if (elapsed) {
- clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ clock = cycles * 1000;
do_div(clock, elapsed);
}
+ submit->queue->ctx->elapsed_ns += elapsed;
+ submit->queue->ctx->cycles += cycles;
+
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_lock(obj);
- msm_gem_active_put(obj);
- msm_gem_unpin_iova_locked(obj, submit->aspace);
- msm_gem_unlock(obj);
- drm_gem_object_put(obj);
- }
+ msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from active->idle: */
+ mutex_lock(&gpu->active_lock);
+ gpu->active_submits--;
+ WARN_ON(gpu->active_submits < 0);
+ if (!gpu->active_submits) {
+ msm_devfreq_idle(gpu);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ }
+
+ mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@@ -798,33 +834,32 @@ static void retire_submits(struct msm_gpu *gpu)
while (true) {
struct msm_gem_submit *submit = NULL;
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
submit = list_first_entry_or_null(&ring->submits,
struct msm_gem_submit, node);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
/*
* If no submit, we are done. If submit->fence hasn't
* been signalled, then later submits are not signalled
* either, so we are also done.
*/
- if (submit && dma_fence_is_signaled(submit->fence)) {
+ if (submit && dma_fence_is_signaled(submit->hw_fence)) {
retire_submit(gpu, ring, submit);
} else {
break;
}
}
}
+
+ wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- int i;
-
- for (i = 0; i < gpu->nr_rings; i++)
- update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
retire_submits(gpu);
}
@@ -832,6 +867,11 @@ static void retire_worker(struct kthread_work *work)
/* call from irq handler to schedule work to retire bo's */
void msm_gpu_retire(struct msm_gpu *gpu)
{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
+
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
}
@@ -839,53 +879,42 @@ void msm_gpu_retire(struct msm_gpu *gpu)
/* add bo's to gpu's ring, and kick gpu: */
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct drm_device *dev = gpu->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
- int i;
+ unsigned long flags;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
- submit->seqno = ++ring->seqno;
-
- msm_rd_dump_submit(priv->rd, submit, NULL);
+ submit->seqno = submit->hw_fence->seqno;
update_sw_cntrs(gpu);
- for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
- struct drm_gem_object *drm_obj = &msm_obj->base;
- uint64_t iova;
-
- /* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_get(&msm_obj->base);
- msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
-
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
- else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
-
- msm_gem_active_get(drm_obj, gpu);
- }
-
/*
* ring->submits holds a ref to the submit, to deal with the case
* that a submit completes before msm_ioctl_gem_submit() returns.
*/
msm_gem_submit_get(submit);
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_add_tail(&submit->node, &ring->submits);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from idle->active: */
+ mutex_lock(&gpu->active_lock);
+ if (!gpu->active_submits) {
+ pm_runtime_get(&gpu->pdev->dev);
+ msm_devfreq_active(gpu);
+ }
+ gpu->active_submits++;
+ mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- priv->lastctx = submit->queue->ctx;
+ submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
+ pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
@@ -920,10 +949,12 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed)
{
- struct msm_gem_address_space *aspace = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
@@ -931,22 +962,23 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
* If the target doesn't support private address spaces then return
* the global one
*/
- if (gpu->funcs->create_private_address_space) {
- aspace = gpu->funcs->create_private_address_space(gpu);
- if (!IS_ERR(aspace))
- aspace->pid = get_pid(task_pid(task));
+ if (gpu->funcs->create_private_vm) {
+ vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
+ if (!IS_ERR(vm))
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
- if (IS_ERR_OR_NULL(aspace))
- aspace = msm_gem_address_space_get(gpu->aspace);
+ if (IS_ERR_OR_NULL(vm))
+ vm = drm_gpuvm_get(gpu->vm);
- return aspace;
+ return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
+ struct msm_drm_private *priv = drm->dev_private;
int i, ret, nr_rings = config->nr_rings;
void *memptrs;
uint64_t memptrs_iova;
@@ -958,7 +990,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
@@ -967,10 +999,21 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
- INIT_LIST_HEAD(&gpu->active_list);
+ mutex_init(&gpu->active_lock);
+ mutex_init(&gpu->lock);
+ init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
- kthread_init_work(&gpu->fault_work, fault_worker);
+
+ priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
+
+ /*
+ * If progress detection is supported, halve the hangcheck timer
+ * duration, as it takes two iterations of the hangcheck handler
+ * to detect a hang.
+ */
+ if (funcs->progress)
+ priv->hangcheck_period /= 2;
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
@@ -978,7 +1021,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Map registers: */
- gpu->mmio = msm_ioremap(pdev, config->ioname, name);
+ gpu->mmio = msm_ioremap(pdev, config->ioname);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
@@ -988,12 +1031,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
- DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
- IRQF_TRIGGER_HIGH, gpu->name, gpu);
+ IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
if (ret) {
DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
@@ -1019,24 +1061,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
-
- gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
-
- if (gpu->aspace == NULL)
- DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- else if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
+ gpu->vm = gpu->funcs->create_vm(gpu, pdev);
+ if (IS_ERR(gpu->vm)) {
+ ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
@@ -1070,6 +1107,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->nr_rings = nr_rings;
+ refcount_set(&gpu->sysprof_active, 1);
+
return 0;
fail:
@@ -1078,7 +1117,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1090,23 +1129,24 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- WARN_ON(!list_empty(&gpu->active_list));
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
- if (!IS_ERR_OR_NULL(gpu->aspace)) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
- msm_gem_address_space_put(gpu->aspace);
+ if (!IS_ERR_OR_NULL(gpu->vm)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
kthread_destroy_worker(gpu->worker);
}
- devfreq_cooling_unregister(gpu->cooling);
+ msm_devfreq_cleanup(gpu);
+
+ platform_set_drvdata(gpu->pdev, NULL);
}