summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c844
1 files changed, 450 insertions, 394 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ca95ab2f4cfa..82094b9f5ba7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -22,13 +22,30 @@
*
*/
-#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
+#include <linux/irq_work.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include "i915_drv.h"
+#include "i915_active.h"
+#include "i915_globals.h"
+#include "i915_reset.h"
+
+struct execute_cb {
+ struct list_head link;
+ struct irq_work work;
+ struct i915_sw_fence *fence;
+};
+
+static struct i915_global_request {
+ struct i915_global base;
+ struct kmem_cache *slab_requests;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_execute_cbs;
+} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -49,7 +66,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->timeline->name;
+ return to_request(fence)->gem_context->name ?: "[i915]";
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -59,14 +76,16 @@ static bool i915_fence_signaled(struct dma_fence *fence)
static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
- return intel_engine_enable_signaling(to_request(fence), true);
+ return i915_request_enable_breadcrumb(to_request(fence));
}
static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@@ -82,7 +101,7 @@ static void i915_fence_release(struct dma_fence *fence)
*/
i915_sw_fence_fini(&rq->submit);
- kmem_cache_free(rq->i915->requests, rq);
+ kmem_cache_free(global.slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -111,99 +130,10 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+static void reserve_gt(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
- enum intel_engine_id id;
- int ret;
-
- /* Carefully retire all requests without writing to the rings */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915->gt.active_requests);
-
- /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- for_each_engine(engine, i915, id) {
- GEM_TRACE("%s seqno %d (current %d) -> %d\n",
- engine->name,
- engine->timeline.seqno,
- intel_engine_get_seqno(engine),
- seqno);
-
- if (seqno == engine->timeline.seqno)
- continue;
-
- kthread_park(engine->breadcrumbs.signaler);
-
- if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
- /* Flush any waiters before we reuse the seqno */
- intel_engine_disarm_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
- }
-
- /* Check we are idle before we fiddle with hw state! */
- GEM_BUG_ON(!intel_engine_is_idle(engine));
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
-
- /* Finally reset hw state */
- intel_engine_init_global_seqno(engine, seqno);
- engine->timeline.seqno = seqno;
-
- kthread_unpark(engine->breadcrumbs.signaler);
- }
-
- list_for_each_entry(timeline, &i915->gt.timelines, link)
- memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
-
- i915->gt.request_serial = seqno;
-
- return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (seqno == 0)
- return -EINVAL;
-
- /* HWS page needs to be set less than what we will inject to ring */
- return reset_all_global_seqno(i915, seqno - 1);
-}
-
-static int reserve_gt(struct drm_i915_private *i915)
-{
- int ret;
-
- /*
- * Reservation is fine until we may need to wrap around
- *
- * By incrementing the serial for every request, we know that no
- * individual engine may exceed that serial (as each is reset to 0
- * on any wrap). This protects even the most pessimistic of migrations
- * of every request from all engines onto just one.
- */
- while (unlikely(++i915->gt.request_serial == 0)) {
- ret = reset_all_global_seqno(i915, 0);
- if (ret) {
- i915->gt.request_serial--;
- return ret;
- }
- }
-
if (!i915->gt.active_requests++)
i915_gem_unpark(i915);
-
- return 0;
}
static void unreserve_gt(struct drm_i915_private *i915)
@@ -213,12 +143,6 @@ static void unreserve_gt(struct drm_i915_private *i915)
i915_gem_park(i915);
}
-void i915_gem_retire_noop(struct i915_gem_active *active,
- struct i915_request *request)
-{
- /* Space left intentionally blank */
-}
-
static void advance_ring(struct i915_request *request)
{
struct intel_ring *ring = request->ring;
@@ -243,7 +167,6 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
- GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -270,11 +193,10 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- intel_engine_get_seqno(engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -286,10 +208,11 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
spin_unlock(&engine->timeline.lock);
spin_lock(&rq->lock);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ i915_request_mark_complete(rq);
+ if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- intel_engine_cancel_signaling(rq);
+ i915_request_cancel_breadcrumb(rq);
if (rq->waitboost) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
@@ -330,13 +253,12 @@ static void __retire_engine_upto(struct intel_engine_cs *engine,
static void i915_request_retire(struct i915_request *request)
{
- struct i915_gem_active *active, *next;
+ struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- intel_engine_get_seqno(request->engine));
+ hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -363,10 +285,10 @@ static void i915_request_retire(struct i915_request *request)
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of i915_gem_active.
- * So we try to keep this loop as streamlined as possible and
- * also prefetch the next i915_gem_active to try and hide
- * the likely cache miss.
+ * cache misses when jumping around the list of
+ * i915_active_request. So we try to keep this loop as
+ * streamlined as possible and also prefetch the next
+ * i915_active_request to try and hide the likely cache miss.
*/
prefetchw(next);
@@ -378,15 +300,13 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
- /* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
unreserve_gt(request->i915);
- i915_sched_node_fini(request->i915, &request->sched);
+ i915_sched_node_fini(&request->sched);
i915_request_put(request);
}
@@ -395,11 +315,10 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
@@ -415,9 +334,67 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (tmp != rq);
}
-static u32 timeline_get_seqno(struct i915_timeline *tl)
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
{
- return ++tl->seqno;
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static int
+i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ gfp_t gfp)
+{
+ struct execute_cb *cb;
+
+ if (i915_request_is_active(signal))
+ return 0;
+
+ cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fence = &rq->submit;
+ i915_sw_fence_await(cb->fence);
+ init_irq_work(&cb->work, irq_execute_cb);
+
+ spin_lock_irq(&signal->lock);
+ if (i915_request_is_active(signal)) {
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+ } else {
+ list_add_tail(&cb->link, &signal->execute_cb);
+ }
+ spin_unlock_irq(&signal->lock);
+
+ return 0;
}
static void move_to_timeline(struct i915_request *request,
@@ -434,39 +411,39 @@ static void move_to_timeline(struct i915_request *request,
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- u32 seqno;
- GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- engine->timeline.seqno + 1,
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(request->global_seqno);
-
- seqno = timeline_get_seqno(&engine->timeline);
- GEM_BUG_ON(!seqno);
- GEM_BUG_ON(intel_engine_signaled(engine, seqno));
+ if (i915_gem_context_is_banned(request->gem_context))
+ i915_request_skip(request, -EIO);
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = seqno;
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_enable_signaling(request, false);
+
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !i915_request_enable_breadcrumb(request))
+ intel_engine_queue_breadcrumbs(engine);
+
+ __notify_execute_cb(request);
+
spin_unlock(&request->lock);
- engine->emit_breadcrumb(request,
- request->ring->vaddr + request->postfix);
+ engine->emit_fini_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
/* Transfer from per-context onto the global per-engine timeline */
move_to_timeline(request, &engine->timeline);
trace_i915_request_execute(request);
-
- wake_up_all(&request->execute);
}
void i915_request_submit(struct i915_request *request)
@@ -486,11 +463,10 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
@@ -499,16 +475,25 @@ void __i915_request_unsubmit(struct i915_request *request)
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
- GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
- engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = 0;
+
+ /*
+ * As we do not allow WAIT to preempt inflight requests,
+ * once we have executed a request, along with triggering
+ * any execution callbacks, we must preserve its ordering
+ * within the non-preemptible FIFO.
+ */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
+ request->sched.attr.priority |= __NO_PREEMPTION;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_cancel_signaling(request);
+ i915_request_cancel_breadcrumb(request);
+
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@@ -566,6 +551,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void ring_retire_requests(struct intel_ring *ring)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
+ if (!i915_request_completed(rq))
+ break;
+
+ i915_request_retire(rq);
+ }
+}
+
+static noinline struct i915_request *
+i915_request_alloc_slow(struct intel_context *ce)
+{
+ struct intel_ring *ring = ce->ring;
+ struct i915_request *rq;
+
+ if (list_empty(&ring->request_list))
+ goto out;
+
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ cond_synchronize_rcu(rq->rcustate);
+
+ /* Retire our old requests in the hope that we free some */
+ ring_retire_requests(ring);
+
+out:
+ return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
+}
+
+static int add_timeline_barrier(struct i915_request *rq)
+{
+ return i915_request_await_active_request(rq, &rq->timeline->barrier);
+}
+
/**
* i915_request_alloc - allocate a request structure
*
@@ -579,8 +601,10 @@ struct i915_request *
i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq;
struct intel_context *ce;
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -596,8 +620,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
- return ERR_PTR(-EIO);
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Pinning the contexts may generate requests in order to acquire
@@ -608,13 +633,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (IS_ERR(ce))
return ERR_CAST(ce);
- ret = reserve_gt(i915);
- if (ret)
- goto err_unpin;
-
- ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
- if (ret)
- goto err_unreserve;
+ reserve_gt(i915);
+ mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -628,7 +648,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* We use RCU to look up requests in flight. The lookups may
* race with the request being allocated from the slab freelist.
* That is the request we are writing to here, may be in the process
- * of being read by __i915_gem_active_get_rcu(). As such,
+ * of being read by __i915_active_request_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
* read the request->global_seqno and increment the reference count.
@@ -651,51 +671,45 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*
* Do not use kmem_cache_zalloc() here!
*/
- rq = kmem_cache_alloc(i915->requests,
+ rq = kmem_cache_alloc(global.slab_requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- i915_retire_requests(i915);
-
- /* Ratelimit ourselves to prevent oom from malicious clients */
- rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (rq)
- cond_synchronize_rcu(rq->rcustate);
-
- rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ rq = i915_request_alloc_slow(ce);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- rq->rcustate = get_state_synchronize_rcu();
-
INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
+
+ tl = ce->ring->timeline;
+ ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
rq->i915 = i915;
rq->engine = engine;
rq->gem_context = ctx;
rq->hw_context = ce;
rq->ring = ce->ring;
- rq->timeline = ce->ring->timeline;
+ rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &engine->timeline);
+ rq->hwsp_seqno = tl->hwsp_seqno;
+ rq->hwsp_cacheline = tl->hwsp_cacheline;
+ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
- dma_fence_init(&rq->fence,
- &i915_fence_ops,
- &rq->lock,
- rq->timeline->fence_context,
- timeline_get_seqno(rq->timeline));
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- init_waitqueue_head(&rq->execute);
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
- rq->global_seqno = 0;
- rq->signaling.wait.seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -707,9 +721,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
+ *
+ * Note that due to how we add reserved_space to intel_ring_begin()
+ * we need to double our request to ensure that if we need to wrap
+ * around inside i915_request_add() there is sufficient space at
+ * the beginning of the ring as well.
*/
- rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -719,8 +737,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+ ret = add_timeline_barrier(rq);
if (ret)
goto err_unwind;
@@ -745,15 +762,70 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
- kmem_cache_free(i915->requests, rq);
+err_free:
+ kmem_cache_free(global.slab_requests, rq);
err_unreserve:
+ mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
-err_unpin:
intel_context_unpin(ce);
return ERR_PTR(ret);
}
static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ u32 hwsp_offset;
+ u32 *cs;
+ int err;
+
+ GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (to->sched.semaphores & from->engine->mask)
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ if (err)
+ return err;
+
+ /* Only submit our spinner after the signaler is running! */
+ err = i915_request_await_execution(to, from, gfp);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(to, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Using greater-than-or-equal here means we have to worry
+ * about seqno wraparound. To side step that issue, we swap
+ * the timeline HWSP upon wrapping, so that everyone listening
+ * for the old (pre-wrap) values do not see the much smaller
+ * (post-wrap) values than they were expecting (and so wait
+ * forever).
+ */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = from->fence.seqno;
+ *cs++ = hwsp_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(to, cs);
+ to->sched.semaphores |= from->engine->mask;
+ to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ return 0;
+}
+
+static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
@@ -765,9 +837,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(to->i915,
- &to->sched,
- &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (ret < 0)
return ret;
}
@@ -776,34 +846,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- return ret < 0 ? ret : 0;
- }
-
- if (to->engine->semaphore.sync_to) {
- u32 seqno;
-
- GEM_BUG_ON(!from->engine->semaphore.signal);
-
- seqno = i915_request_global_seqno(from);
- if (!seqno)
- goto await_dma_fence;
-
- if (seqno <= to->timeline->global_sync[from->engine->id])
- return 0;
-
- trace_i915_gem_ring_sync_to(to, from);
- ret = to->engine->semaphore.sync_to(to, from);
- if (ret)
- return ret;
-
- to->timeline->global_sync[from->engine->id] = seqno;
- return 0;
+ } else if (intel_engine_has_semaphores(to->engine) &&
+ to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
+ } else {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
-await_dma_fence:
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -948,6 +999,60 @@ void i915_request_skip(struct i915_request *rq, int error)
memset(vaddr + head, 0, rq->postfix - head);
}
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct i915_timeline *timeline = rq->timeline;
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ */
+ prev = i915_active_request_raw(&timeline->last_request,
+ &rq->i915->drm.struct_mutex);
+ if (prev && !i915_request_completed(prev)) {
+ if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+ else
+ __i915_sw_fence_await_dma_fence(&rq->submit,
+ &prev->fence,
+ &rq->dmaq);
+ if (rq->engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&rq->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
+ __i915_active_request_set(&timeline->last_request, rq);
+
+ return prev;
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -961,10 +1066,10 @@ void i915_request_add(struct i915_request *request)
struct i915_request *prev;
u32 *cs;
- GEM_TRACE("%s fence %llx:%d\n",
+ GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
- lockdep_assert_held(&request->i915->drm.struct_mutex);
+ lockdep_assert_held(&request->timeline->mutex);
trace_i915_request_add(request);
/*
@@ -979,8 +1084,8 @@ void i915_request_add(struct i915_request *request)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
+ GEM_BUG_ON(request->reserved_space > request->ring->space);
request->reserved_space = 0;
- engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -988,41 +1093,16 @@ void i915_request_add(struct i915_request *request)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /*
- * Seal the request and mark it as pending execution. Note that
- * we may inspect this state, without holding any locks, during
- * hangcheck. Hence we apply the barrier to ensure that we do not
- * see a more recent value in the hws than we are tracking.
- */
-
- prev = i915_gem_active_raw(&timeline->last_request,
- &request->i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
- &request->submitq);
- if (engine->schedule)
- __i915_sched_node_add_dependency(&request->sched,
- &prev->sched,
- &request->dep,
- 0);
- }
-
- spin_lock_irq(&timeline->lock);
- list_add_tail(&request->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
-
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
- i915_gem_active_set(&timeline->last_request, request);
+ prev = __i915_request_add_to_timeline(request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list)) {
- GEM_TRACE("marking %s as active\n", ring->timeline->name);
+ if (list_is_first(&request->ring_link, &ring->request_list))
list_add(&ring->active_link, &request->i915->gt.active_rings);
- }
+ request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;
/*
@@ -1042,12 +1122,27 @@ void i915_request_add(struct i915_request *request)
struct i915_sched_attr attr = request->gem_context->sched;
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
* Boost priorities to new clients (new request flows).
*
* Allow interactive/synchronous clients to jump ahead of
* the bulk clients. (FQ_CODEL)
*/
- if (!prev || i915_request_completed(prev))
+ if (list_empty(&request->sched.signalers_list))
attr.priority |= I915_PRIORITY_NEWCLIENT;
engine->schedule(request, &attr);
@@ -1075,6 +1170,8 @@ void i915_request_add(struct i915_request *request)
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
+
+ mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1110,13 +1207,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request *rq,
- u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq,
+ int state, unsigned long timeout_us)
{
- struct intel_engine_cs *engine = rq->engine;
- unsigned int irq, cpu;
-
- GEM_BUG_ON(!seqno);
+ unsigned int cpu;
/*
* Only wait for the request if we know it is likely to complete.
@@ -1124,12 +1218,12 @@ static bool __i915_spin_request(const struct i915_request *rq,
* We don't track the timestamps around requests, nor the average
* request length, so we do not have a good indicator that this
* request will complete within the timeout. What we do know is the
- * order in which requests are executed by the engine and so we can
- * tell if the request has started. If the request hasn't started yet,
- * it is a fair assumption that it will not complete within our
- * relatively short timeout.
+ * order in which requests are executed by the context and so we can
+ * tell if the request has been started. If the request is not even
+ * running yet, it is a fair assumption that it will not complete
+ * within our relatively short timeout.
*/
- if (!intel_engine_has_started(engine, seqno))
+ if (!i915_request_is_running(rq))
return false;
/*
@@ -1143,20 +1237,10 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (intel_engine_has_completed(engine, seqno))
- return seqno == i915_request_global_seqno(rq);
-
- /*
- * Seqno are meant to be ordered *before* the interrupt. If
- * we see an interrupt without a corresponding seqno advance,
- * assume we won't see one in the near future but require
- * the engine->seqno_barrier() to fixup coherency.
- */
- if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
- break;
+ if (i915_request_completed(rq))
+ return true;
if (signal_pending_state(state, current))
break;
@@ -1170,16 +1254,16 @@ static bool __i915_spin_request(const struct i915_request *rq,
return false;
}
-static bool __i915_wait_request_check_and_reset(struct i915_request *request)
-{
- struct i915_gpu_error *error = &request->i915->gpu_error;
+struct request_wait {
+ struct dma_fence_cb cb;
+ struct task_struct *tsk;
+};
- if (likely(!i915_reset_handoff(error)))
- return false;
+static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct request_wait *wait = container_of(cb, typeof(*wait), cb);
- __set_current_state(TASK_RUNNING);
- i915_reset(request->i915, error->stalled_mask, error->reason);
- return true;
+ wake_up_process(wait->tsk);
}
/**
@@ -1207,17 +1291,9 @@ long i915_request_wait(struct i915_request *rq,
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
- DEFINE_WAIT_FUNC(reset, default_wake_function);
- DEFINE_WAIT_FUNC(exec, default_wake_function);
- struct intel_wait wait;
+ struct request_wait wait;
might_sleep();
-#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
- !!(flags & I915_WAIT_LOCKED));
-#endif
GEM_BUG_ON(timeout < 0);
if (i915_request_completed(rq))
@@ -1228,57 +1304,38 @@ long i915_request_wait(struct i915_request *rq,
trace_i915_request_wait_begin(rq, flags);
- add_wait_queue(&rq->execute, &exec);
- if (flags & I915_WAIT_LOCKED)
- add_wait_queue(errq, &reset);
+ /* Optimistic short spin before touching IRQs */
+ if (__i915_spin_request(rq, state, 5))
+ goto out;
- intel_wait_init(&wait);
- if (flags & I915_WAIT_PRIORITY)
+ /*
+ * This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we sleep. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery).
+ */
+ if (flags & I915_WAIT_PRIORITY) {
+ if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
+ }
-restart:
- do {
- set_current_state(state);
- if (intel_wait_update_request(&wait, rq))
- break;
-
- if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(rq))
- continue;
-
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- goto complete;
- }
-
- if (!timeout) {
- timeout = -ETIME;
- goto complete;
- }
-
- timeout = io_schedule_timeout(timeout);
- } while (1);
-
- GEM_BUG_ON(!intel_wait_has_seqno(&wait));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
- /* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(rq, wait.seqno, state, 5))
- goto complete;
+ wait.tsk = current;
+ if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
+ goto out;
- set_current_state(state);
- if (intel_engine_add_wait(rq->engine, &wait))
- /*
- * In order to check that we haven't missed the interrupt
- * as we enabled it, we need to kick ourselves to do a
- * coherent check on the seqno before we sleep.
- */
- goto wakeup;
+ for (;;) {
+ set_current_state(state);
- if (flags & I915_WAIT_LOCKED)
- __i915_wait_request_check_and_reset(rq);
+ if (i915_request_completed(rq))
+ break;
- for (;;) {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
@@ -1290,70 +1347,14 @@ restart:
}
timeout = io_schedule_timeout(timeout);
-
- if (intel_wait_complete(&wait) &&
- intel_wait_check_request(&wait, rq))
- break;
-
- set_current_state(state);
-
-wakeup:
- /*
- * Carefully check if the request is complete, giving time
- * for the seqno to be visible following the interrupt.
- * We also have to check in case we are kicked by the GPU
- * reset in order to drop the struct_mutex.
- */
- if (__i915_request_irq_complete(rq))
- break;
-
- /*
- * If the GPU is hung, and we hold the lock, reset the GPU
- * and then check for completion. On a full reset, the engine's
- * HW seqno will be advanced passed us and we are complete.
- * If we do a partial reset, we have to wait for the GPU to
- * resume and update the breadcrumb.
- *
- * If we don't hold the mutex, we can just wait for the worker
- * to come along and update the breadcrumb (either directly
- * itself, or indirectly by recovering the GPU).
- */
- if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(rq))
- continue;
-
- /* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(rq, wait.seqno, state, 2))
- break;
-
- if (!intel_wait_check_request(&wait, rq)) {
- intel_engine_remove_wait(rq->engine, &wait);
- goto restart;
- }
}
-
- intel_engine_remove_wait(rq->engine, &wait);
-complete:
__set_current_state(TASK_RUNNING);
- if (flags & I915_WAIT_LOCKED)
- remove_wait_queue(errq, &reset);
- remove_wait_queue(&rq->execute, &exec);
- trace_i915_request_wait_end(rq);
-
- return timeout;
-}
-static void ring_retire_requests(struct intel_ring *ring)
-{
- struct i915_request *request, *next;
-
- list_for_each_entry_safe(request, next,
- &ring->request_list, ring_link) {
- if (!i915_request_completed(request))
- break;
+ dma_fence_remove_callback(&rq->fence, &wait.cb);
- i915_request_retire(request);
- }
+out:
+ trace_i915_request_wait_end(rq);
+ return timeout;
}
void i915_retire_requests(struct drm_i915_private *i915)
@@ -1365,11 +1366,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
if (!i915->gt.active_requests)
return;
- list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+ list_for_each_entry_safe(ring, tmp,
+ &i915->gt.active_rings, active_link) {
+ intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
+ intel_ring_put(ring);
+ }
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
+
+static void i915_global_request_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_execute_cbs);
+ kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_execute_cbs);
+ kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+ .shrink = i915_global_request_shrink,
+ .exit = i915_global_request_exit,
+} };
+
+int __init i915_global_request_init(void)
+{
+ global.slab_requests = KMEM_CACHE(i915_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_requests)
+ return -ENOMEM;
+
+ global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_execute_cbs)
+ goto err_requests;
+
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!global.slab_dependencies)
+ goto err_execute_cbs;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_execute_cbs:
+ kmem_cache_destroy(global.slab_execute_cbs);
+err_requests:
+ kmem_cache_destroy(global.slab_requests);
+ return -ENOMEM;
+}