summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_request.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.h')
-rw-r--r--drivers/gpu/drm/i915/i915_request.h49
1 files changed, 37 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 1bfe214a47e9..270f6cd37650 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,7 +26,9 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
#include <linux/irq_work.h>
+#include <linux/llist.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -237,16 +239,6 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /*
- * If we need to access the timeline's seqno for this request in
- * another request, we need to keep a read reference to this associated
- * cacheline, so that we do not free and recycle it before the foreign
- * observers have completed. Hence, we keep a pointer to the cacheline
- * inside the timeline's HWSP vma, but it is only valid while this
- * request has not completed and guarded by the timeline mutex.
- */
- struct intel_timeline_cacheline __rcu *hwsp_cacheline;
-
/** Position in the ring of the start of the request */
u32 head;
@@ -287,6 +279,12 @@ struct i915_request {
/** timeline->request entry for this request */
struct list_head link;
+ /** Watchdog support fields. */
+ struct i915_request_watchdog {
+ struct llist_node link;
+ struct hrtimer timer;
+ } watchdog;
+
I915_SELFTEST_DECLARE(struct {
struct list_head link;
unsigned long delay;
@@ -310,8 +308,8 @@ struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
void __i915_request_skip(struct i915_request *rq);
-void i915_request_set_error_once(struct i915_request *rq, int error);
-void i915_request_mark_eio(struct i915_request *rq);
+bool i915_request_set_error_once(struct i915_request *rq, int error);
+struct i915_request *i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
@@ -366,6 +364,8 @@ void i915_request_submit(struct i915_request *request);
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
+void i915_request_cancel(struct i915_request *rq, int error);
+
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
@@ -616,4 +616,29 @@ i915_request_active_timeline(const struct i915_request *rq)
lockdep_is_held(&rq->engine->active.lock));
}
+static inline u32
+i915_request_active_seqno(const struct i915_request *rq)
+{
+ u32 hwsp_phys_base =
+ page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
+ u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
+
+ /*
+ * Because of wraparound, we cannot simply take tl->hwsp_offset,
+ * but instead use the fact that the relative for vaddr is the
+ * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
+ * and combine them with the relative offset in rq->hwsp_seqno.
+ *
+ * As rw->hwsp_seqno is rewritten when signaled, this only works
+ * when the request isn't signaled yet, but at that point you
+ * no longer need the offset.
+ */
+
+ return hwsp_phys_base + hwsp_relative_offset;
+}
+
+bool
+i915_request_active_engine(struct i915_request *rq,
+ struct intel_engine_cs **active);
+
#endif /* I915_REQUEST_H */