summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-03-14 11:56:59 +1000
committerDave Airlie <airlied@redhat.com>2019-03-14 11:57:11 +1000
commitad7ad48e09871e7bf6d75db80243f2abb30b4db9 (patch)
tree1ce55ec72e0d0e5fd8cdd9b48d426a24fc3fc447 /drivers
parent74cd45fa90a234de92a3f3ce8bdabeb643b546a5 (diff)
parentca22f32a6296cbfa29de56328c8505560a18cfa8 (diff)
Merge tag 'drm-intel-next-fixes-2019-03-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- HDCP state handling in ddi_update_pipe - Protect i915_active iterators from the shrinker - Reacquire priolist cache after dropping the engine lock - (Selftest) Always free spinner on __sseu_prepare error - Acquire breadcrumb ref before canceling - Fix atomic state leak on HDMI link reset - Relax mmap VMA check Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190312205551.GA7701@intel.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c27
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c69
6 files changed, 94 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 215b6ff8aa73..db7bb5bd5add 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq)
{
struct i915_active_request *active;
+ int err = 0;
+
+ /* Prevent reaping in case we malloc/wait while building the tree */
+ i915_active_acquire(ref);
active = active_instance(ref, timeline);
- if (IS_ERR(active))
- return PTR_ERR(active);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto out;
+ }
if (!i915_active_request_isset(active))
ref->count++;
__i915_active_request_set(active, rq);
GEM_BUG_ON(!ref->count);
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
struct active_node *it, *n;
- int ret;
+ int err = 0;
- ret = i915_request_await_active_request(rq, &ref->last);
- if (ret)
- return ret;
+ /* await allocates and so we need to avoid hitting the shrinker */
+ if (i915_active_acquire(ref))
+ goto out; /* was idle */
+
+ err = i915_request_await_active_request(rq, &ref->last);
+ if (err)
+ goto out;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_request_await_active_request(rq, &it->base);
- if (ret)
- return ret;
+ err = i915_request_await_active_request(rq, &it->base);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6728ea5c71d4..30d516e975c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
if (vma->vm_file != filp)
return false;
- return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+ return vma->vm_start == addr &&
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d01683167c77..8bc042551692 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -223,8 +223,14 @@ out:
return &p->requests[idx];
}
+struct sched_cache {
+ struct list_head *priolist;
+};
+
static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+sched_lock_engine(const struct i915_sched_node *node,
+ struct intel_engine_cs *locked,
+ struct sched_cache *cache)
{
struct intel_engine_cs *engine = node_to_request(node)->engine;
@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
if (engine != locked) {
spin_unlock(&locked->timeline.lock);
+ memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock);
}
@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
- struct list_head *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
const int prio = attr->priority;
+ struct sched_cache cache;
LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */
@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
__list_del_entry(&stack.dfs_link);
}
- last = NULL;
+ memset(&cache, 0, sizeof(cache));
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
INIT_LIST_HEAD(&dep->dfs_link);
- engine = sched_lock_engine(node, engine);
+ engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */
@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
node->attr.priority = prio;
if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = i915_sched_lookup_priolist(engine, prio);
- last = engine;
- }
- list_move_tail(&node->link, pl);
+ if (!cache.priolist)
+ cache.priolist =
+ i915_sched_lookup_priolist(engine,
+ prio);
+ list_move_tail(&node->link, cache.priolist);
} else {
/*
* If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cacaa1d04d17..09ed90c0ba00 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * in which case we can skip processing it ourselves.
- */
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags))
- continue;
/*
* Queue for execution after dropping the signaling
@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
+
+ /*
+ * We may race with direct invocation of
+ * dma_fence_signal(), e.g. i915_request_retire(),
+ * so we need to acquire our reference to the request
+ * before we cancel the breadcrumb.
+ */
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca705546a0ab..14d580cdefd3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ else if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ intel_hdcp_disable(to_intel_connector(conn_state->connector));
}
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
ret = drm_atomic_commit(state);
- if (ret)
- goto out;
-
- return 0;
-
- out:
+out:
drm_atomic_state_put(state);
return ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index d00d0bb07784..7eb58a9d1319 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
unsigned int flags,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- struct igt_spinner **spin_out)
+ struct igt_spinner **spin)
{
- int ret = 0;
-
- if (flags & (TEST_BUSY | TEST_RESET)) {
- struct igt_spinner *spin;
- struct i915_request *rq;
+ struct i915_request *rq;
+ int ret;
- spin = kzalloc(sizeof(*spin), GFP_KERNEL);
- if (!spin) {
- ret = -ENOMEM;
- goto out;
- }
+ *spin = NULL;
+ if (!(flags & (TEST_BUSY | TEST_RESET)))
+ return 0;
- ret = igt_spinner_init(spin, i915);
- if (ret)
- return ret;
+ *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
+ if (!*spin)
+ return -ENOMEM;
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- igt_spinner_fini(spin);
- kfree(spin);
- goto out;
- }
+ ret = igt_spinner_init(*spin, i915);
+ if (ret)
+ goto err_free;
- i915_request_add(rq);
+ rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err_fini;
+ }
- if (!igt_wait_for_spinner(spin, rq)) {
- pr_err("%s: Spinner failed to start!\n", name);
- igt_spinner_end(spin);
- igt_spinner_fini(spin);
- kfree(spin);
- ret = -ETIMEDOUT;
- goto out;
- }
+ i915_request_add(rq);
- *spin_out = spin;
+ if (!igt_wait_for_spinner(*spin, rq)) {
+ pr_err("%s: Spinner failed to start!\n", name);
+ ret = -ETIMEDOUT;
+ goto err_end;
}
-out:
+ return 0;
+
+err_end:
+ igt_spinner_end(*spin);
+err_fini:
+ igt_spinner_fini(*spin);
+err_free:
+ kfree(fetch_and_zero(spin));
return ret;
}
@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
if (ret)
- goto out;
+ goto out_context;
ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
if (ret)
- goto out;
+ goto out_spin;
ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
hweight32(sseu.slice_mask), spin);
-out:
+out_spin:
if (spin) {
igt_spinner_end(spin);
igt_spinner_fini(spin);
kfree(spin);
}
+out_context:
kernel_context_close(kctx);
return ret;