summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vc4/vc4_gem.c
diff options
context:
space:
mode:
authorVarad Gautam <varadgautam@gmail.com>2016-02-17 19:08:21 +0530
committerEric Anholt <eric@anholt.net>2016-03-13 17:05:05 -0700
commitca26d28bbaa39f31d5e7e4812603b015c8d54207 (patch)
tree25e4b6777ecfb6844739479949dac3c32479ea4a /drivers/gpu/drm/vc4/vc4_gem.c
parent67d1c0a25c05e2105d12abd9c0172d2d5c0e7654 (diff)
drm/vc4: improve throughput by pipelining binning and rendering jobs
The hardware provides us with separate threads for binning and rendering, and the existing model waits for them both to complete before submitting the next job. Splitting the binning and rendering submissions reduces idle time and gives us approx 20-30% speedup with some x11perf tests such as -line10 and -tilerect1. Improves openarena performance by 1.01897% +/- 0.247857% (n=16). Thanks to anholt for suggesting this. v2: Rebase on the spurious resets fix (change by anholt). Signed-off-by: Varad Gautam <varadgautam@gmail.com> Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_gem.c')
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c123
1 files changed, 89 insertions, 34 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 202aa1544acc..8d4384f8b78d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -141,10 +141,10 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_hang_state *state;
struct vc4_hang_state *kernel_state;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, unref_list_count;
+ unsigned int i, j, unref_list_count, prev_idx;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -153,37 +153,55 @@ vc4_save_hang_state(struct drm_device *dev)
state = &kernel_state->user_state;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
- if (!exec) {
+ exec[0] = vc4_first_bin_job(vc4);
+ exec[1] = vc4_first_render_job(vc4);
+ if (!exec[0] && !exec[1]) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- unref_list_count = 0;
- list_for_each_entry(bo, &exec->unref_list, unref_head)
- unref_list_count++;
+ /* Get the bos from both binner and renderer into hang state. */
+ state->bo_count = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
+
+ unref_list_count = 0;
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
+ unref_list_count++;
+ state->bo_count += exec[i]->bo_count + unref_list_count;
+ }
+
+ kernel_state->bo = kcalloc(state->bo_count,
+ sizeof(*kernel_state->bo), GFP_ATOMIC);
- state->bo_count = exec->bo_count + unref_list_count;
- kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
- GFP_ATOMIC);
if (!kernel_state->bo) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- for (i = 0; i < exec->bo_count; i++) {
- drm_gem_object_reference(&exec->bo[i]->base);
- kernel_state->bo[i] = &exec->bo[i]->base;
- }
+ prev_idx = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
- list_for_each_entry(bo, &exec->unref_list, unref_head) {
- drm_gem_object_reference(&bo->base.base);
- kernel_state->bo[i] = &bo->base.base;
- i++;
+ for (j = 0; j < exec[i]->bo_count; j++) {
+ drm_gem_object_reference(&exec[i]->bo[j]->base);
+ kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ }
+
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+ drm_gem_object_reference(&bo->base.base);
+ kernel_state->bo[j + prev_idx] = &bo->base.base;
+ j++;
+ }
+ prev_idx = j + 1;
}
- state->start_bin = exec->ct0ca;
- state->start_render = exec->ct1ca;
+ if (exec[0])
+ state->start_bin = exec[0]->ct0ca;
+ if (exec[1])
+ state->start_render = exec[1]->ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
@@ -267,13 +285,15 @@ vc4_hangcheck_elapsed(unsigned long data)
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *bin_exec, *render_exec;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
+
+ bin_exec = vc4_first_bin_job(vc4);
+ render_exec = vc4_first_render_job(vc4);
/* If idle, we can stop watching for hangs. */
- if (!exec) {
+ if (!bin_exec && !render_exec) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
@@ -284,9 +304,12 @@ vc4_hangcheck_elapsed(unsigned long data)
/* If we've made any progress in execution, rearm the timer
* and wait.
*/
- if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
- exec->last_ct0ca = ct0ca;
- exec->last_ct1ca = ct1ca;
+ if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
+ (render_exec && ct1ca != render_exec->last_ct1ca)) {
+ if (bin_exec)
+ bin_exec->last_ct0ca = ct0ca;
+ if (render_exec)
+ render_exec->last_ct1ca = ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev);
return;
@@ -386,11 +409,13 @@ vc4_flush_caches(struct drm_device *dev)
* The job_lock should be held during this.
*/
void
-vc4_submit_next_job(struct drm_device *dev)
+vc4_submit_next_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_job(vc4);
+ struct vc4_exec_info *exec;
+again:
+ exec = vc4_first_bin_job(vc4);
if (!exec)
return;
@@ -400,11 +425,40 @@ vc4_submit_next_job(struct drm_device *dev)
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- if (exec->ct0ca != exec->ct0ea)
+ /* Either put the job in the binner if it uses the binner, or
+ * immediately move it to the to-be-rendered queue.
+ */
+ if (exec->ct0ca != exec->ct0ea) {
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+ } else {
+ vc4_move_job_to_render(dev, exec);
+ goto again;
+ }
+}
+
+void
+vc4_submit_next_render_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_render_job(vc4);
+
+ if (!exec)
+ return;
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
+void
+vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ bool was_empty = list_empty(&vc4->render_job_list);
+
+ list_move_tail(&exec->head, &vc4->render_job_list);
+ if (was_empty)
+ vc4_submit_next_render_job(dev);
+}
+
static void
vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
{
@@ -443,14 +497,14 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
exec->seqno = seqno;
vc4_update_bo_seqnos(exec, seqno);
- list_add_tail(&exec->head, &vc4->job_list);
+ list_add_tail(&exec->head, &vc4->bin_job_list);
/* If no job was executing, kick ours off. Otherwise, it'll
- * get started when the previous job's frame done interrupt
+ * get started when the previous job's flush done interrupt
* occurs.
*/
- if (vc4_first_job(vc4) == exec) {
- vc4_submit_next_job(dev);
+ if (vc4_first_bin_job(vc4) == exec) {
+ vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
@@ -859,7 +913,8 @@ vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- INIT_LIST_HEAD(&vc4->job_list);
+ INIT_LIST_HEAD(&vc4->bin_job_list);
+ INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);