diff options
author | Dave Airlie <airlied@redhat.com> | 2019-05-28 08:25:46 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-05-28 08:59:11 +1000 |
commit | 88cd7a2c1b29f61a2a3fab76216a43f3b779e0cd (patch) | |
tree | b8a4311c8f7edfc92c1055b6a5a86b1a5552878e /drivers/gpu/drm/v3d/v3d_sched.c | |
parent | cd6c84d8f0cdc911df435bb075ba22ce3c605b07 (diff) | |
parent | 909fa3321d348ef69366aad9e84e1dd9ee0bd060 (diff) |
Merge tag 'drm-misc-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v5.3, try #2:
UAPI Changes:
- Add HDR source metadata property.
- Make drm.h compile on GNU/kFreeBSD by including stdint.h
- Clarify how the userspace reviewer has to review new kernel UAPI.
- Clarify that for using new UAPI, merging to drm-next or drm-misc-next should be enough.
Cross-subsystem Changes:
- video/hdmi: Add unpack function for DRM infoframes.
- Device tree bindings:
* Updating a property for Mali Midgard GPUs
* Updating a property for STM32 DSI panel
* Adding support for FriendlyELEC HD702E 800x1280 panel
* Adding support for Evervision VGG804821 800x480 5.0" WVGA TFT panel
* Adding support for the EDT ET035012DM6 3.5" 320x240 QVGA 24-bit RGB TFT.
* Adding support for Three Five displays TFC S9700RTWV43TR-01B 800x480 panel
with resistive touch found on TI's AM335X-EVM.
* Adding support for EDT ETM0430G0DH6 480x272 panel.
- Add OSD101T2587-53TS driver with DT bindings.
- Add Samsung S6E63M0 panel driver with DT bindings.
- Add VXT VL050-8048NT-C01 800x480 panel with DT bindings.
- Dma-buf:
- Make mmap callback actually optional.
- Documentation updates.
- Fix debugfs refcount inbalance.
- Remove unused sync_dump function.
- Fix device tree bindings in drm-misc-next after a botched merge.
Core Changes:
- Add support for HDR infoframes and related EDID parsing.
- Remove prime sg_table caching, now done inside dma-buf.
- Add shiny new drm_gem_vram helpers for simple VRAM drivers;
with some fixes to the new API on top.
- Small fix to job cleanup without timeout handler.
- Documentation fixes to drm_fourcc.
- Replace lookups of drm_format with struct drm_format_info;
remove functions that become obsolete by this conversion.
- Remove double include in bridge/panel.c and some drivers.
- Remove drmP.h include from drm/edid and drm/dp.
- Fix null pointer deref in drm_fb_helper_hotplug_event().
- Remove most members from drm_fb_helper_crtc, only mode_set is kept.
- Remove race of fb helpers with userspace; only restore mode
when userspace is not master.
- Move legacy setup from drm_file.c to drm_legacy_misc.c
- Rework scheduler job destruction.
- drm/bus was removed, remove from TODO.
- Add __drm_atomic_helper_crtc_reset() to subclass crtc_state,
and convert some drivers to use it (conversion is not complete yet).
- Bump vblank timeout wait to 100 ms for atomic.
- Docbook fix for drm_hdmi_infoframe_set_hdr_metadata.
Driver Changes:
- sun4i: Use DRM_GEM_CMA_VMAP_DRIVER_OPS instead of definining manually.
- v3d: Small cleanups, adding support for compute shaders,
reservation/synchronization fixes and job management refactoring,
fixes MMU and debugfs.
- lima: Fix null pointer in irq handler on startup, set default timeout for scheduled jobs.
- stm/ltdc: Assorted fixes and adding FB modifier support.
- amdgpu: Avoid hw reset if guilty job was already signaled.
- virtio: Add seqno to fences, add trace events, use correct flags for fence allocation.
- Convert AST, bochs, mgag200, vboxvideo, hisilicon to the new drm_gem_vram API.
- sun6i_mipi_dsi: Support DSI GENERIC_SHORT_WRITE_2 transfers.
- bochs: Small fix to use PTR_RET_OR_ZERO and driver unload.
- gma500: header fixes
- cirrus: Remove unused files.
- mediatek: Fix compiler warning after merging the HDR series.
- vc4: Rework binner bo handling.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/052875a5-27ba-3832-60c2-193d950afdff@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/v3d/v3d_sched.c')
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 382 |
1 files changed, 250 insertions, 132 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e740f3b99aa5..8c2df6d95283 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -30,158 +30,152 @@ to_v3d_job(struct drm_sched_job *sched_job) return container_of(sched_job, struct v3d_job, base); } -static struct v3d_tfu_job * -to_tfu_job(struct drm_sched_job *sched_job) +static struct v3d_bin_job * +to_bin_job(struct drm_sched_job *sched_job) { - return container_of(sched_job, struct v3d_tfu_job, base); + return container_of(sched_job, struct v3d_bin_job, base.base); } -static void -v3d_job_free(struct drm_sched_job *sched_job) +static struct v3d_render_job * +to_render_job(struct drm_sched_job *sched_job) { - struct v3d_job *job = to_v3d_job(sched_job); + return container_of(sched_job, struct v3d_render_job, base.base); +} - drm_sched_job_cleanup(sched_job); +static struct v3d_tfu_job * +to_tfu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_tfu_job, base.base); +} - v3d_exec_put(job->exec); +static struct v3d_csd_job * +to_csd_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_csd_job, base.base); } static void -v3d_tfu_job_free(struct drm_sched_job *sched_job) +v3d_job_free(struct drm_sched_job *sched_job) { - struct v3d_tfu_job *job = to_tfu_job(sched_job); + struct v3d_job *job = to_v3d_job(sched_job); drm_sched_job_cleanup(sched_job); - - v3d_tfu_job_put(job); + v3d_job_put(job); } /** - * Returns the fences that the bin or render job depends on, one by one. - * v3d_job_run() won't be called until all of them have been signaled. + * Returns the fences that the job depends on, one by one. + * + * If placed in the scheduler's .dependency method, the corresponding + * .run_job won't be called until all of them have been signaled. */ static struct dma_fence * v3d_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - struct dma_fence *fence; - - fence = job->in_fence; - if (fence) { - job->in_fence = NULL; - return fence; - } - - if (q == V3D_RENDER) { - /* If we had a bin job, the render job definitely depends on - * it. We first have to wait for bin to be scheduled, so that - * its done_fence is created. - */ - fence = exec->bin_done_fence; - if (fence) { - exec->bin_done_fence = NULL; - return fence; - } - } /* XXX: Wait on a fence for switching the GMP if necessary, * and then do so. */ - return fence; -} - -/** - * Returns the fences that the TFU job depends on, one by one. - * v3d_tfu_job_run() won't be called until all of them have been - * signaled. - */ -static struct dma_fence * -v3d_tfu_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) -{ - struct v3d_tfu_job *job = to_tfu_job(sched_job); - struct dma_fence *fence; - - fence = job->in_fence; - if (fence) { - job->in_fence = NULL; - return fence; - } + if (!xa_empty(&job->deps)) + return xa_erase(&job->deps, job->last_dep++); return NULL; } -static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) +static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { - struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - struct v3d_dev *v3d = exec->v3d; + struct v3d_bin_job *job = to_bin_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; unsigned long irqflags; - if (unlikely(job->base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) return NULL; /* Lock required around bin_job update vs * v3d_overflow_mem_work(). */ spin_lock_irqsave(&v3d->job_lock, irqflags); - if (q == V3D_BIN) { - v3d->bin_job = job->exec; - - /* Clear out the overflow allocation, so we don't - * reuse the overflow attached to a previous job. - */ - V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); - } else { - v3d->render_job = job->exec; - } + v3d->bin_job = job; + /* Clear out the overflow allocation, so we don't + * reuse the overflow attached to a previous job. + */ + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); spin_unlock_irqrestore(&v3d->job_lock, irqflags); - /* Can we avoid this flush when q==RENDER? We need to be - * careful of scheduling, though -- imagine job0 rendering to - * texture and job1 reading, and them being executed as bin0, - * bin1, render0, render1, so that render1's flush at bin time + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_BIN); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, + job->start, job->end); + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + if (job->qma) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); + } + if (job->qts) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, + V3D_CLE_CT0QTS_ENABLE | + job->qts); + } + V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); + + return fence; +} + +static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + v3d->render_job = job; + + /* Can we avoid this flush? We need to be careful of + * scheduling, though -- imagine job0 rendering to texture and + * job1 reading, and them being executed as bin0, bin1, + * render0, render1, so that render1's flush at bin time * wasn't enough. */ v3d_invalidate_caches(v3d); - fence = v3d_fence_create(v3d, q); + fence = v3d_fence_create(v3d, V3D_RENDER); if (IS_ERR(fence)) return NULL; - if (job->irq_fence) - dma_fence_put(job->irq_fence); - job->irq_fence = dma_fence_get(fence); + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); - trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, + trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); - if (q == V3D_BIN) { - if (exec->qma) { - V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); - V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); - } - if (exec->qts) { - V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, - V3D_CLE_CT0QTS_ENABLE | - exec->qts); - } - } else { - /* XXX: Set the QCFG */ - } + /* XXX: Set the QCFG */ /* Set the current and end address of the control list. * Writing the end register is what starts the job. */ - V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); - V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); + V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); return fence; } @@ -190,7 +184,7 @@ static struct dma_fence * v3d_tfu_job_run(struct drm_sched_job *sched_job) { struct v3d_tfu_job *job = to_tfu_job(sched_job); - struct v3d_dev *v3d = job->v3d; + struct v3d_dev *v3d = job->base.v3d; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; @@ -199,9 +193,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) return NULL; v3d->tfu_job = job; - if (job->irq_fence) - dma_fence_put(job->irq_fence); - job->irq_fence = dma_fence_get(fence); + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); @@ -223,6 +217,48 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) return fence; } +static struct dma_fence * +v3d_csd_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + int i; + + v3d->csd_job = job; + + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_CSD); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + + for (i = 1; i <= 6; i++) + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); + /* CFG0 write kicks off the job. */ + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); + + return fence; +} + +static struct dma_fence * +v3d_cache_clean_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_dev *v3d = job->v3d; + + v3d_clean_caches(v3d); + + return NULL; +} + static void v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) { @@ -232,7 +268,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* block scheduler */ for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_stop(&v3d->queue[q].sched); + drm_sched_stop(&v3d->queue[q].sched, sched_job); if (sched_job) drm_sched_increase_karma(sched_job); @@ -251,25 +287,23 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) mutex_unlock(&v3d->reset_lock); } +/* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ static void -v3d_job_timedout(struct drm_sched_job *sched_job) +v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, + u32 *timedout_ctca, u32 *timedout_ctra) { struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - struct v3d_dev *v3d = exec->v3d; - enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); - u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); - - /* If the current address or return address have changed, then - * the GPU has probably made progress and we should delay the - * reset. This could fail if the GPU got in an infinite loop - * in the CL, but that is pretty unlikely outside of an i-g-t - * testcase. - */ - if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { - job->timedout_ctca = ctca; - job->timedout_ctra = ctra; + struct v3d_dev *v3d = job->v3d; + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + + if (*timedout_ctca != ctca || *timedout_ctra != ctra) { + *timedout_ctca = ctca; + *timedout_ctra = ctra; return; } @@ -277,25 +311,82 @@ v3d_job_timedout(struct drm_sched_job *sched_job) } static void -v3d_tfu_job_timedout(struct drm_sched_job *sched_job) +v3d_bin_job_timedout(struct drm_sched_job *sched_job) { - struct v3d_tfu_job *job = to_tfu_job(sched_job); + struct v3d_bin_job *job = to_bin_job(sched_job); + + v3d_cl_job_timedout(sched_job, V3D_BIN, + &job->timedout_ctca, &job->timedout_ctra); +} + +static void +v3d_render_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + + v3d_cl_job_timedout(sched_job, V3D_RENDER, + &job->timedout_ctca, &job->timedout_ctra); +} + +static void +v3d_generic_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); v3d_gpu_reset_for_timeout(job->v3d, sched_job); } -static const struct drm_sched_backend_ops v3d_sched_ops = { +static void +v3d_csd_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); + + /* If we've made progress, skip reset and let the timer get + * rearmed. + */ + if (job->timedout_batches != batches) { + job->timedout_batches = batches; + return; + } + + v3d_gpu_reset_for_timeout(v3d, sched_job); +} + +static const struct drm_sched_backend_ops v3d_bin_sched_ops = { .dependency = v3d_job_dependency, - .run_job = v3d_job_run, - .timedout_job = v3d_job_timedout, - .free_job = v3d_job_free + .run_job = v3d_bin_job_run, + .timedout_job = v3d_bin_job_timedout, + .free_job = v3d_job_free, +}; + +static const struct drm_sched_backend_ops v3d_render_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_render_job_run, + .timedout_job = v3d_render_job_timedout, + .free_job = v3d_job_free, }; static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { - .dependency = v3d_tfu_job_dependency, + .dependency = v3d_job_dependency, .run_job = v3d_tfu_job_run, - .timedout_job = v3d_tfu_job_timedout, - .free_job = v3d_tfu_job_free + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_job_free, +}; + +static const struct drm_sched_backend_ops v3d_csd_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_csd_job_run, + .timedout_job = v3d_csd_job_timedout, + .free_job = v3d_job_free +}; + +static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_cache_clean_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_job_free }; int @@ -307,7 +398,7 @@ v3d_sched_init(struct v3d_dev *v3d) int ret; ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, - &v3d_sched_ops, + &v3d_bin_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), "v3d_bin"); @@ -317,14 +408,14 @@ v3d_sched_init(struct v3d_dev *v3d) } ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, - &v3d_sched_ops, + &v3d_render_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), "v3d_render"); if (ret) { dev_err(v3d->dev, "Failed to create render scheduler: %d.", ret); - drm_sched_fini(&v3d->queue[V3D_BIN].sched); + v3d_sched_fini(v3d); return ret; } @@ -336,11 +427,36 @@ v3d_sched_init(struct v3d_dev *v3d) if (ret) { dev_err(v3d->dev, "Failed to create TFU scheduler: %d.", ret); - drm_sched_fini(&v3d->queue[V3D_RENDER].sched); - drm_sched_fini(&v3d->queue[V3D_BIN].sched); + v3d_sched_fini(v3d); return ret; } + if (v3d_has_csd(v3d)) { + ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, + &v3d_csd_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_csd"); + if (ret) { + dev_err(v3d->dev, "Failed to create CSD scheduler: %d.", + ret); + v3d_sched_fini(v3d); + return ret; + } + + ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, + &v3d_cache_clean_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_cache_clean"); + if (ret) { + dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.", + ret); + v3d_sched_fini(v3d); + return ret; + } + } + return 0; } @@ -349,6 +465,8 @@ v3d_sched_fini(struct v3d_dev *v3d) { enum v3d_queue q; - for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_fini(&v3d->queue[q].sched); + for (q = 0; q < V3D_MAX_QUEUES; q++) { + if (v3d->queue[q].sched.ready) + drm_sched_fini(&v3d->queue[q].sched); + } } |