diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
35 files changed, 560 insertions, 315 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 31337d2a2cde..aabf09f89cad 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -12878,10 +12878,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, case 10 ... 11: bpp = 10 * 3; break; - case 12: + case 12 ... 16: bpp = 12 * 3; break; default: + MISSING_CASE(conn_state->max_bpc); return -EINVAL; } @@ -18020,16 +18021,6 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) if (!HAS_GMCH(i915)) sanitize_watermarks(i915); - /* - * Force all active planes to recompute their states. So that on - * mode_setcrtc after probe, all the intel_plane_state variables - * are already calculated and there is no assert_plane warnings - * during bootup. - */ - ret = intel_initial_commit(dev); - if (ret) - drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); - return 0; } @@ -18038,11 +18029,21 @@ int intel_modeset_init(struct drm_i915_private *i915) { int ret; - intel_overlay_setup(i915); - if (!HAS_DISPLAY(i915)) return 0; + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(&i915->drm); + if (ret) + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); + + intel_overlay_setup(i915); + ret = intel_fbdev_init(&i915->drm); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index bf1e9cf1c0f3..9bc59fd2f95f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -573,7 +573,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, + min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 8a9d0bdde1bf..40e9cb29233d 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1754,7 +1754,7 @@ void intel_psr_atomic_check(struct drm_connector *connector, return; intel_connector = to_intel_connector(connector); - dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector)); + dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder)); if (dev_priv->psr.dp != &dig_port->dp) return; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 7c90a63c273d..fcce6909f201 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -509,21 +509,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, return -ENOENT; /* - * Already in the desired write domain? Nothing for us to do! - * - * We apply a little bit of cunning here to catch a broader set of - * no-ops. If obj->write_domain is set, we must be in the same - * obj->read_domains, and only that domain. Therefore, if that - * obj->write_domain matches the request read_domains, we are - * already in the same read/write domain and can skip the operation, - * without having to further check the requested write_domain. - */ - if (READ_ONCE(obj->write_domain) == read_domains) { - err = 0; - goto out; - } - - /* * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. @@ -560,6 +545,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (err) goto out; + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) + goto out_unpin; + err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_unpin; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1904e6e5ea64..b07dc1156a0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static void eb_request_add(struct i915_execbuffer *eb) +static int eb_request_add(struct i915_execbuffer *eb, int err) { struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); @@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb) /* Serialise with context_close via the add_to_timeline */ i915_request_set_error_once(rq, -ENOENT); __i915_request_skip(rq); + err = -ENOENT; /* override any transient errors */ } __i915_request_queue(rq, &attr); @@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb) retire_requests(tl, prev); mutex_unlock(&tl->mutex); + + return err; } static const i915_user_extension_fn execbuf_extensions[] = { @@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb, batch); err_request: i915_request_get(eb.request); - eb_request_add(&eb); + err = eb_request_add(&eb, err); if (eb.fences) signal_fence_array(&eb); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index b5c15557cc87..d6711caa7f39 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -56,6 +56,8 @@ struct drm_i915_gem_object_ops { void (*truncate)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj); + int (*pread)(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *arg); int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 28147aab47b9..3a4dfe2ef1da 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, vaddr, dma); } +static int +phys_pwrite(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *args) +{ + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + char __user *user_data = u64_to_user_ptr(args->data_ptr); + int err; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + /* + * We manually control the domain here and pretend that it + * remains coherent i.e. in the GTT domain, like shmem_pwrite. + */ + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); + + if (copy_from_user(vaddr, user_data, args->size)) + return -EFAULT; + + drm_clflush_virt_range(vaddr, args->size); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); + + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); + return 0; +} + +static int +phys_pread(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *args) +{ + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + char __user *user_data = u64_to_user_ptr(args->data_ptr); + int err; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + drm_clflush_virt_range(vaddr, args->size); + if (copy_to_user(user_data, vaddr, args->size)) + return -EFAULT; + + return 0; +} + static void phys_release(struct drm_i915_gem_object *obj) { fput(obj->base.filp); @@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .get_pages = i915_gem_object_get_pages_phys, .put_pages = i915_gem_object_put_pages_phys, + .pread = phys_pread, + .pwrite = phys_pwrite, + .release = phys_release, }; diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index d8b206e53660..a24cc1ff08a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -30,18 +30,21 @@ #include "i915_trace.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" -static void irq_enable(struct intel_engine_cs *engine) +static bool irq_enable(struct intel_engine_cs *engine) { if (!engine->irq_enable) - return; + return false; /* Caller disables interrupts */ spin_lock(&engine->gt->irq_lock); engine->irq_enable(engine); spin_unlock(&engine->gt->irq_lock); + + return true; } static void irq_disable(struct intel_engine_cs *engine) @@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine) static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || b->irq_armed) - return; - - if (!intel_gt_pm_get_if_awake(b->irq_engine->gt)) + /* + * Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. + */ + if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt))) return; /* @@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) */ WRITE_ONCE(b->irq_armed, true); - /* - * Since we are waiting on a request, the GPU should be busy - * and should have its own rpm reference. This is tracked - * by i915->gt.awake, we can forgo holding our own wakref - * for the interrupt as before i915->gt.awake is released (when - * the driver is idle) we disarm the breadcrumbs. - */ - - if (!b->irq_enabled++) - irq_enable(b->irq_engine); + /* Requests may have completed before we could enable the interrupt. */ + if (!b->irq_enabled++ && irq_enable(b->irq_engine)) + irq_work_queue(&b->irq_work); } -static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || !b->irq_armed) + if (!b->irq_engine) return; + spin_lock(&b->irq_lock); + if (!b->irq_armed) + __intel_breadcrumbs_arm_irq(b); + spin_unlock(&b->irq_lock); +} + +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) irq_disable(b->irq_engine); @@ -100,20 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) intel_gt_pm_put_async(b->irq_engine->gt); } +static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ + spin_lock(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock(&b->irq_lock); +} + static void add_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - intel_context_get(ce); - list_add_tail(&ce->signal_link, &b->signalers); - if (list_is_first(&ce->signal_link, &b->signalers)) - __intel_breadcrumbs_arm_irq(b); + lockdep_assert_held(&ce->signal_lock); + + spin_lock(&b->signalers_lock); + list_add_rcu(&ce->signal_link, &b->signalers); + spin_unlock(&b->signalers_lock); } -static void remove_signaling_context(struct intel_breadcrumbs *b, +static bool remove_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - list_del(&ce->signal_link); - intel_context_put(ce); + lockdep_assert_held(&ce->signal_lock); + + if (!list_empty(&ce->signals)) + return false; + + spin_lock(&b->signalers_lock); + list_del_rcu(&ce->signal_link); + spin_unlock(&b->signalers_lock); + + return true; } static inline bool __request_completed(const struct i915_request *rq) @@ -174,73 +192,103 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) intel_engine_add_retire(b->irq_engine, tl); } -static bool __signal_request(struct i915_request *rq, struct list_head *signals) +static bool __signal_request(struct i915_request *rq) { - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); if (!__dma_fence_signal(&rq->fence)) { i915_request_put(rq); return false; } - list_add_tail(&rq->signal_link, signals); return true; } +static struct llist_node * +slist_add(struct llist_node *node, struct llist_node *head) +{ + node->next = head; + return node; +} + static void signal_irq_work(struct irq_work *work) { struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); const ktime_t timestamp = ktime_get(); - struct intel_context *ce, *cn; - struct list_head *pos, *next; - LIST_HEAD(signal); - - spin_lock(&b->irq_lock); + struct llist_node *signal, *sn; + struct intel_context *ce; - if (list_empty(&b->signalers)) - __intel_breadcrumbs_disarm_irq(b); + signal = NULL; + if (unlikely(!llist_empty(&b->signaled_requests))) + signal = llist_del_all(&b->signaled_requests); - list_splice_init(&b->signaled_requests, &signal); + /* + * Keep the irq armed until the interrupt after all listeners are gone. + * + * Enabling/disabling the interrupt is rather costly, roughly a couple + * of hundred microseconds. If we are proactive and enable/disable + * the interrupt around every request that wants a breadcrumb, we + * quickly drown in the extra orders of magnitude of latency imposed + * on request submission. + * + * So we try to be lazy, and keep the interrupts enabled until no + * more listeners appear within a breadcrumb interrupt interval (that + * is until a request completes that no one cares about). The + * observation is that listeners come in batches, and will often + * listen to a bunch of requests in succession. Though note on icl+, + * interrupts are always enabled due to concerns with rc6 being + * dysfunctional with per-engine interrupt masking. + * + * We also try to avoid raising too many interrupts, as they may + * be generated by userspace batches and it is unfortunately rather + * too easy to drown the CPU under a flood of GPU interrupts. Thus + * whenever no one appears to be listening, we turn off the interrupts. + * Fewer interrupts should conserve power -- at the very least, fewer + * interrupt draw less ire from other users of the system and tools + * like powertop. + */ + if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers)) + intel_breadcrumbs_disarm_irq(b); - list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { - GEM_BUG_ON(list_empty(&ce->signals)); + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + struct i915_request *rq; - list_for_each_safe(pos, next, &ce->signals) { - struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + list_for_each_entry_rcu(rq, &ce->signals, signal_link) { + bool release; - GEM_BUG_ON(!check_signal_order(ce, rq)); if (!__request_completed(rq)) break; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)) + break; + /* * Queue for execution after dropping the signaling * spinlock as the callback chain may end up adding * more signalers to the same context or engine. */ - __signal_request(rq, &signal); - } + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(b, ce); + spin_unlock(&ce->signal_lock); - /* - * We process the list deletion in bulk, only using a list_add - * (not list_move) above but keeping the status of - * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. - */ - if (!list_is_first(pos, &ce->signals)) { - /* Advance the list to the first incomplete request */ - __list_del_many(&ce->signals, pos); - if (&ce->signals == pos) { /* now empty */ + if (__signal_request(rq)) + /* We own signal_node now, xfer to local list */ + signal = slist_add(&rq->signal_node, signal); + + if (release) { add_retire(b, ce->timeline); - remove_signaling_context(b, ce); + intel_context_put(ce); } } } + rcu_read_unlock(); - spin_unlock(&b->irq_lock); - - list_for_each_safe(pos, next, &signal) { + llist_for_each_safe(signal, sn, signal) { struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + llist_entry(signal, typeof(*rq), signal_node); struct list_head cb_list; spin_lock(&rq->lock); @@ -251,6 +299,9 @@ static void signal_irq_work(struct irq_work *work) i915_request_put(rq); } + + if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers)) + intel_breadcrumbs_arm_irq(b); } struct intel_breadcrumbs * @@ -262,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) if (!b) return NULL; - spin_lock_init(&b->irq_lock); + b->irq_engine = irq_engine; + + spin_lock_init(&b->signalers_lock); INIT_LIST_HEAD(&b->signalers); - INIT_LIST_HEAD(&b->signaled_requests); + init_llist_head(&b->signaled_requests); + spin_lock_init(&b->irq_lock); init_irq_work(&b->irq_work, signal_irq_work); - b->irq_engine = irq_engine; - return b; } @@ -292,27 +344,28 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b) void intel_breadcrumbs_park(struct intel_breadcrumbs *b) { - unsigned long flags; - - if (!READ_ONCE(b->irq_armed)) - return; - - spin_lock_irqsave(&b->irq_lock, flags); - __intel_breadcrumbs_disarm_irq(b); - spin_unlock_irqrestore(&b->irq_lock, flags); - - if (!list_empty(&b->signalers)) - irq_work_queue(&b->irq_work); + /* Kick the work once more to drain the signalers */ + irq_work_sync(&b->irq_work); + while (unlikely(READ_ONCE(b->irq_armed))) { + local_irq_disable(); + signal_irq_work(&b->irq_work); + local_irq_enable(); + cond_resched(); + } + GEM_BUG_ON(!list_empty(&b->signalers)); } void intel_breadcrumbs_free(struct intel_breadcrumbs *b) { + irq_work_sync(&b->irq_work); + GEM_BUG_ON(!list_empty(&b->signalers)); + GEM_BUG_ON(b->irq_armed); kfree(b); } -static void insert_breadcrumb(struct i915_request *rq, - struct intel_breadcrumbs *b) +static void insert_breadcrumb(struct i915_request *rq) { + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; struct list_head *pos; @@ -327,12 +380,14 @@ static void insert_breadcrumb(struct i915_request *rq, * its signal completion. */ if (__request_completed(rq)) { - if (__signal_request(rq, &b->signaled_requests)) + if (__signal_request(rq) && + llist_add(&rq->signal_node, &b->signaled_requests)) irq_work_queue(&b->irq_work); return; } if (list_empty(&ce->signals)) { + intel_context_get(ce); add_signaling_context(b, ce); pos = &ce->signals; } else { @@ -358,18 +413,22 @@ static void insert_breadcrumb(struct i915_request *rq, break; } } - list_add(&rq->signal_link, pos); + list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - /* Check after attaching to irq, interrupt may have already fired. */ - if (__request_completed(rq)) - irq_work_queue(&b->irq_work); + /* + * Defer enabling the interrupt to after HW submission and recheck + * the request as it may have completed and raised the interrupt as + * we were attaching it into the lists. + */ + irq_work_queue(&b->irq_work); } bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b; + struct intel_context *ce = rq->context; /* Serialises with i915_request_retire() using rq->lock */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) @@ -384,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) return true; - /* - * rq->engine is locked by rq->engine->active.lock. That however - * is not known until after rq->engine has been dereferenced and - * the lock acquired. Hence we acquire the lock and then validate - * that rq->engine still matches the lock we hold for it. - * - * Here, we are using the breadcrumb lock as a proxy for the - * rq->engine->active.lock, and we know that since the breadcrumb - * will be serialised within i915_request_submit/i915_request_unsubmit, - * the engine cannot change while active as long as we hold the - * breadcrumb lock on that engine. - * - * From the dma_fence_enable_signaling() path, we are outside of the - * request submit/unsubmit path, and so we must be more careful to - * acquire the right lock. - */ - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { - spin_unlock(&b->irq_lock); - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - } - - /* - * Now that we are finally serialised with request submit/unsubmit, - * [with b->irq_lock] and with i915_request_retire() [via checking - * SIGNALED with rq->lock] confirm the request is indeed active. If - * it is no longer active, the breadcrumb will be attached upon - * i915_request_submit(). - */ + spin_lock(&ce->signal_lock); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - insert_breadcrumb(rq, b); - - spin_unlock(&b->irq_lock); + insert_breadcrumb(rq); + spin_unlock(&ce->signal_lock); return true; } void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = rq->engine->breadcrumbs; + struct intel_context *ce = rq->context; + bool release; - /* - * We must wait for b->irq_lock so that we know the interrupt handler - * has released its reference to the intel_context and has completed - * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if - * required). - */ - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { - struct intel_context *ce = rq->context; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + return; - list_del(&rq->signal_link); - if (list_empty(&ce->signals)) - remove_signaling_context(b, ce); + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(rq->engine->breadcrumbs, ce); + spin_unlock(&ce->signal_lock); + if (release) + intel_context_put(ce); - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - i915_request_put(rq); - } - spin_unlock(&b->irq_lock); + i915_request_put(rq); } static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) @@ -454,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) drm_printf(p, "Signals:\n"); - spin_lock_irq(&b->irq_lock); - list_for_each_entry(ce, &b->signalers, signal_link) { - list_for_each_entry(rq, &ce->signals, signal_link) { + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + list_for_each_entry_rcu(rq, &ce->signals, signal_link) drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", jiffies_to_msecs(jiffies - rq->emitted_jiffies)); - } } - spin_unlock_irq(&b->irq_lock); + rcu_read_unlock(); } void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h index 8e53b9942695..a74bb3062bd8 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -29,18 +29,16 @@ * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects the lists used in hardirq context */ - /* Not all breadcrumbs are attached to physical HW */ struct intel_engine_cs *irq_engine; + spinlock_t signalers_lock; /* protects the list of signalers */ struct list_head signalers; - struct list_head signaled_requests; + struct llist_head signaled_requests; + spinlock_t irq_lock; /* protects the interrupt from hardirq context */ struct irq_work irq_work; /* for use from inside irq_lock */ - unsigned int irq_enabled; - bool irq_armed; }; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 92a3f25c4006..349e7fa1488d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void) return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); } -void intel_context_free(struct intel_context *ce) +static void rcu_context_free(struct rcu_head *rcu) { + struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); + kmem_cache_free(global.slab_ce, ce); } +void intel_context_free(struct intel_context *ce) +{ + call_rcu(&ce->rcu, rcu_context_free); +} + struct intel_context * intel_context_create(struct intel_engine_cs *engine) { @@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active) } void -intel_context_init(struct intel_context *ce, - struct intel_engine_cs *engine) +intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) { GEM_BUG_ON(!engine->cops); GEM_BUG_ON(!engine->gt->vm); @@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce, ce->vm = i915_vm_get(engine->gt->vm); - INIT_LIST_HEAD(&ce->signal_link); + /* NB ce->signal_link/lock is used under RCU */ + spin_lock_init(&ce->signal_lock); INIT_LIST_HEAD(&ce->signals); mutex_init(&ce->pin_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 552cb57a2e8c..52fa9c132746 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8); struct i915_gem_context; struct i915_gem_ww_ctx; struct i915_vma; +struct intel_breadcrumbs; struct intel_context; struct intel_ring; @@ -44,7 +45,16 @@ struct intel_context_ops { }; struct intel_context { - struct kref ref; + /* + * Note: Some fields may be accessed under RCU. + * + * Unless otherwise noted a field can safely be assumed to be protected + * by strong reference counting. + */ + union { + struct kref ref; /* no kref_get_unless_zero()! */ + struct rcu_head rcu; + }; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; @@ -54,8 +64,15 @@ struct intel_context { struct i915_address_space *vm; struct i915_gem_context __rcu *gem_context; - struct list_head signal_link; - struct list_head signals; + /* + * @signal_lock protects the list of requests that need signaling, + * @signals. While there are any requests that need signaling, + * we add the context to the breadcrumbs worker, and remove it + * upon completion/cancellation of the last request. + */ + struct list_head signal_link; /* Accessed under RCU */ + struct list_head signals; /* Guarded by signal_lock */ + spinlock_t signal_lock; /* protects signals, the list of requests */ struct i915_vma *state; struct intel_ring *ring; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 7c3a1012e702..760fefdfe392 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -245,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u } static inline u32 * -__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) +__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) { - /* We're using qword write, offset should be aligned to 8 bytes. */ - GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - - /* w/a for post sync ops following a GPGPU operation we - * need a prior CS_STALL, which is emitted by the flush - * following the batch. - */ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; - *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB; - *cs++ = gtt_offset; + *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; + *cs++ = offset; *cs++ = 0; *cs++ = value; - /* We're thrashing one dword of HWS. */ - *cs++ = 0; + *cs++ = 0; /* We're thrashing one extra dword. */ return cs; } @@ -268,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f static inline u32* gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) { - return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags); + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + 0, + flags | PIPE_CONTROL_GLOBAL_GTT_IVB); } static inline u32* gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) { - return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1); + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + flags0, + flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB); +} + +static inline u32 * +__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + *cs++ = (MI_FLUSH_DW + 1) | flags; + *cs++ = gtt_offset; + *cs++ = 0; + *cs++ = value; + + return cs; } static inline u32 * @@ -285,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; - *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; - *cs++ = 0; - *cs++ = value; - - return cs; + return __gen8_emit_flush_dw(cs, + value, + gtt_offset | MI_FLUSH_DW_USE_GTT, + flags | MI_FLUSH_DW_OP_STOREDW); } static inline void __intel_engine_reset(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 5bfb5f7ed02c..efdeb7b7b2a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -371,7 +371,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * instances. */ if ((INTEL_GEN(i915) >= 11 && - engine->gt->info.vdbox_sfc_access & engine->mask) || + (engine->gt->info.vdbox_sfc_access & + BIT(engine->instance))) || (INTEL_GEN(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a32aabce7901..724b2cb897d3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -182,6 +182,7 @@ struct virtual_engine { struct intel_engine_cs base; struct intel_context context; + struct rcu_work rcu; /* * We allow only a single request through the virtual engine at a time @@ -2787,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq) static bool execlists_hold(struct intel_engine_cs *engine, struct i915_request *rq) { + if (i915_request_on_hold(rq)) + return false; + spin_lock_irq(&engine->active.lock); if (i915_request_completed(rq)) { /* too late! */ @@ -3168,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->active.lock, flags); /* Recheck after serialising with direct-submission */ - if (unlikely(timeout && preempt_timeout(engine))) + if (unlikely(timeout && preempt_timeout(engine))) { + cancel_timer(&engine->execlists.preempt); execlists_reset(engine, "preemption time out"); + } } } @@ -3547,6 +3553,19 @@ static const struct intel_context_ops execlists_context_ops = { .destroy = execlists_context_destroy, }; +static u32 hwsp_offset(const struct i915_request *rq) +{ + const struct intel_timeline_cacheline *cl; + + /* Before the request is executed, the timeline/cachline is fixed */ + + cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); + if (cl) + return cl->ggtt_offset; + + return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset; +} + static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; @@ -3569,7 +3588,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) *cs++ = MI_NOOP; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_request_timeline(rq)->hwsp_offset; + *cs++ = hwsp_offset(rq); *cs++ = 0; *cs++ = rq->fence.seqno - 1; @@ -4886,11 +4905,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs) +static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) { - u32 addr = i915_request_active_timeline(request)->hwsp_offset; - - return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0); + return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); } static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) @@ -4909,7 +4926,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL); @@ -4921,7 +4938,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | @@ -4983,7 +5000,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) { - return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); + /* XXX Stalling flush before seqno write; post-sync not */ + cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0)); + return gen12_emit_fini_breadcrumb_tail(rq, cs); } static u32 * @@ -4991,7 +5010,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen12_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL0_HDC_PIPELINE_FLUSH, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | @@ -5412,44 +5431,90 @@ static struct list_head *virtual_queue(struct virtual_engine *ve) return &ve->base.execlists.default_priolist.requests[0]; } -static void virtual_context_destroy(struct kref *kref) +static void rcu_virtual_context_destroy(struct work_struct *wrk) { struct virtual_engine *ve = - container_of(kref, typeof(*ve), context.ref); + container_of(wrk, typeof(*ve), rcu.work); unsigned int n; - GEM_BUG_ON(!list_empty(virtual_queue(ve))); - GEM_BUG_ON(ve->request); GEM_BUG_ON(ve->context.inflight); + /* Preempt-to-busy may leave a stale request behind. */ + if (unlikely(ve->request)) { + struct i915_request *old; + + spin_lock_irq(&ve->base.active.lock); + + old = fetch_and_zero(&ve->request); + if (old) { + GEM_BUG_ON(!i915_request_completed(old)); + __i915_request_submit(old); + i915_request_put(old); + } + + spin_unlock_irq(&ve->base.active.lock); + } + + /* + * Flush the tasklet in case it is still running on another core. + * + * This needs to be done before we remove ourselves from the siblings' + * rbtrees as in the case it is running in parallel, it may reinsert + * the rb_node into a sibling. + */ + tasklet_kill(&ve->base.execlists.tasklet); + + /* Decouple ourselves from the siblings, no more access allowed. */ for (n = 0; n < ve->num_siblings; n++) { struct intel_engine_cs *sibling = ve->siblings[n]; struct rb_node *node = &ve->nodes[sibling->id].rb; - unsigned long flags; if (RB_EMPTY_NODE(node)) continue; - spin_lock_irqsave(&sibling->active.lock, flags); + spin_lock_irq(&sibling->active.lock); /* Detachment is lazily performed in the execlists tasklet */ if (!RB_EMPTY_NODE(node)) rb_erase_cached(node, &sibling->execlists.virtual); - spin_unlock_irqrestore(&sibling->active.lock, flags); + spin_unlock_irq(&sibling->active.lock); } GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); + GEM_BUG_ON(!list_empty(virtual_queue(ve))); if (ve->context.state) __execlists_context_fini(&ve->context); intel_context_fini(&ve->context); + intel_breadcrumbs_free(ve->base.breadcrumbs); intel_engine_free_request_pool(&ve->base); kfree(ve->bonds); kfree(ve); } +static void virtual_context_destroy(struct kref *kref) +{ + struct virtual_engine *ve = + container_of(kref, typeof(*ve), context.ref); + + GEM_BUG_ON(!list_empty(&ve->context.signals)); + + /* + * When destroying the virtual engine, we have to be aware that + * it may still be in use from an hardirq/softirq context causing + * the resubmission of a completed request (background completion + * due to preempt-to-busy). Before we can free the engine, we need + * to flush the submission code and tasklets that are still potentially + * accessing the engine. Flushing the tasklets requires process context, + * and since we can guard the resubmit onto the engine with an RCU read + * lock, we can delegate the free of the engine to an RCU worker. + */ + INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); + queue_rcu_work(system_wq, &ve->rcu); +} + static void virtual_engine_initial_hint(struct virtual_engine *ve) { int swp; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index b8f56e62158e..413dadfac2d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -59,8 +59,7 @@ struct drm_i915_mocs_table { #define _L3_CACHEABILITY(value) ((value) << 4) /* Helper defines */ -#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ -#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ +#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ /* @@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), - L3_3_WB) + L3_3_WB), + + /* + * mocs:63 + * - used by the L3 for all of its evictions. + * Thus it is expected to allow LLC cacheability to enable coherent + * flows to be maintained. + * - used to force L3 uncachable cycles. + * Thus it is expected to make the surface L3 uncacheable. + */ + MOCS_ENTRY(63, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ @@ -243,8 +254,9 @@ static const struct drm_i915_mocs_entry tgl_mocs_table[] = { * only, __init_mocs_table() take care to program unused index with * this entry. */ - MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), - L3_3_WB), + MOCS_ENTRY(I915_MOCS_PTE, + LE_0_PAGETABLE | LE_TC_0_PAGETABLE, + L3_1_UC), GEN11_MOCS_ENTRIES, /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ @@ -315,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index ab675d35030d..d7b8e4457fc2 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -56,9 +56,12 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) static void gen11_rc6_enable(struct intel_rc6 *rc6) { - struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_gt *gt = rc6_to_gt(rc6); + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; + u32 pg_enable; + int i; /* 2b: Program RC6 thresholds.*/ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); @@ -102,10 +105,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - set(uncore, GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE); + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + + if (INTEL_GEN(gt->i915) >= 12) { + for (i = 0; i < I915_MAX_VCS; i++) + if (HAS_ENGINE(gt, _VCS(i))) + pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) | + VDN_MFX_POWERGATE_ENABLE(i)); + } + + set(uncore, GEN9_PG_ENABLE, pg_enable); } static void gen9_rc6_enable(struct intel_rc6 *rc6) diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index e6a00eea0631..c1c9cc0ad3b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -883,6 +883,10 @@ void intel_rps_park(struct intel_rps *rps) adj = -2; rps->last_adj = adj; rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); + if (rps->cur_freq < rps->efficient_freq) { + rps->cur_freq = rps->efficient_freq; + rps->last_adj = 0; + } GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index a2f74cefe4c3..7ea94d201fe6 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) return cl; } -static void cacheline_acquire(struct intel_timeline_cacheline *cl) +static void cacheline_acquire(struct intel_timeline_cacheline *cl, + u32 ggtt_offset) { - if (cl) - i915_active_acquire(&cl->active); + if (!cl) + return; + + cl->ggtt_offset = ggtt_offset; + i915_active_acquire(&cl->active); } static void cacheline_release(struct intel_timeline_cacheline *cl) @@ -340,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww) GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", tl->fence_context, tl->hwsp_offset); - cacheline_acquire(tl->hwsp_cacheline); + cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset); if (atomic_fetch_inc(&tl->pin_count)) { cacheline_release(tl->hwsp_cacheline); __i915_vma_unpin(tl->hwsp_ggtt); @@ -515,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", tl->fence_context, tl->hwsp_offset); - cacheline_acquire(cl); + cacheline_acquire(cl, tl->hwsp_offset); tl->hwsp_cacheline = cl; *seqno = timeline_advance(tl); @@ -573,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from, if (err) goto out; - *hwsp = i915_ggtt_offset(cl->hwsp->vma) + - ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES; - + *hwsp = cl->ggtt_offset; out: i915_active_release(&cl->active); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 02181c5020db..4474f487f589 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -94,6 +94,8 @@ struct intel_timeline_cacheline { struct intel_timeline_hwsp *hwsp; void *vaddr; + u32 ggtt_offset; + struct rcu_head rcu; }; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 6c580d0d9ea8..4a3bde7c9f21 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -131,8 +131,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) return; } - if (wal->list) + if (wal->list) { memcpy(list, wal->list, sizeof(*wa) * wal->count); + kfree(wal->list); + } wal->list = list; } diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index f011ea42487e..5982b62f913d 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file) mapping_set_unevictable(file->f_mapping); return vaddr; err_page: - while (--i >= 0) + while (i--) put_page(pages[i]); kvfree(pages); return NULL; @@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off, return PTR_ERR(page); vaddr = kmap(page); - if (write) + if (write) { memcpy(vaddr + offset_in_page(off), ptr, this); - else + set_page_dirty(page); + } else { memcpy(ptr, vaddr + offset_in_page(off), this); + } + mark_page_accessed(page); kunmap(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 7ba16ddfe75f..d7898e87791f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -164,7 +164,7 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { /* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { - 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 9831361f181e..a81cf0f01e78 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -255,7 +255,7 @@ struct intel_gvt_mmio { #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) -/* This reg has been accessed through GPU commands */ +/* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware * logical context image diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 3be37e6fe33d..eb342a759943 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); - if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { + if (value != 0 && + !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", offset, value); return -EINVAL; @@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +/** + * FixMe: + * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: + * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) + * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing + * these MI_BATCH_BUFFER. + * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT + * PML4 PTE: PAT(0) PCD(1) PWT(1). + * The performance is still expected to be low, will need further improvement. + */ +static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u64 pat = + GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, CHV_PPAT_SNOOP) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + vgpu_vreg(vgpu, offset) = lower_32_bits(pat); + + return 0; +} + static int guc_status_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) @@ -2812,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); MMIO_D(GAMTARBMODE, D_BDW_PLUS); @@ -3139,7 +3168,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); return 0; } @@ -3313,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ad8a9df49f29..778eb8cab610 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -829,8 +829,10 @@ static int intel_vgpu_open(struct mdev_device *mdev) /* Take a module reference as mdev core doesn't take * a reference for vendor driver. */ - if (!try_module_get(THIS_MODULE)) + if (!try_module_get(THIS_MODULE)) { + ret = -ENODEV; goto undo_group; + } ret = kvmgt_guest_init(mdev); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1570eb8aa978..aed2ef6466a2 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1277,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->gt, id) - intel_context_unpin(s->shadow[id]); + intel_context_put(s->shadow[id]); kmem_cache_destroy(s->workloads); } @@ -1369,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ce->ring = __intel_context_ring_size(ring_size); } - ret = intel_context_pin(ce); - intel_context_put(ce); - if (ret) - goto out_shadow_ctx; - s->shadow[i] = ce; } @@ -1405,7 +1400,6 @@ out_shadow_ctx: if (IS_ERR(s->shadow[i])) break; - intel_context_unpin(s->shadow[i]); intel_context_put(s->shadow[i]); } i915_vm_put(&ppgtt->vm); @@ -1479,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; + intel_context_unpin(s->shadow[workload->engine->id]); release_shadow_batch_buffer(workload); release_shadow_wa_ctx(&workload->wa_ctx); @@ -1724,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, return ERR_PTR(ret); } + ret = intel_context_pin(s->shadow[engine->id]); + if (ret) { + intel_vgpu_destroy_workload(workload); + return ERR_PTR(ret); + } + return workload; } diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index f6d7e33c7099..399582aeeefb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -439,7 +439,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (IS_BROADWELL(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); - else + /* FixMe: Re-enable APL/BXT once vfio_edid enabled */ + else if (!IS_BROXTON(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bb0c12975f38..58276694c848 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -180,30 +180,6 @@ try_again: } static int -i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file) -{ - void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; - char __user *user_data = u64_to_user_ptr(args->data_ptr); - - /* - * We manually control the domain here and pretend that it - * remains coherent i.e. in the GTT domain, like shmem_pwrite. - */ - i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); - - if (copy_from_user(vaddr, user_data, args->size)) - return -EFAULT; - - drm_clflush_virt_range(vaddr, args->size); - intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - - i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - return 0; -} - -static int i915_gem_create(struct drm_file *file, struct intel_memory_region *mr, u64 *size_p, @@ -527,6 +503,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pread(obj, args->offset, args->size); + ret = -ENODEV; + if (obj->ops->pread) + ret = obj->ops->pread(obj, args); + if (ret != -ENODEV) + goto out; + ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); @@ -866,8 +848,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret == -EFAULT || ret == -ENOSPC) { if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(obj, args); - else - ret = i915_gem_phys_pwrite(obj, args, file); } i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e94976976571..3640d0e229d2 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_I915_PERF_RECORD_OA_REPORT_LOST); if (ret) return ret; - intel_uncore_write(uncore, oastatus_reg, - oastatus & ~GEN8_OASTATUS_REPORT_LOST); + + intel_uncore_rmw(uncore, oastatus_reg, + GEN8_OASTATUS_COUNTER_OVERFLOW | + GEN8_OASTATUS_REPORT_LOST, + IS_GEN_RANGE(uncore->i915, 8, 10) ? + (GEN8_OASTATUS_HEAD_POINTER_WRAP | + GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); } return gen8_append_oa_reports(stream, buf, count, offset); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d805d4da6181..5cd83eac940c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN8_OASTATUS _MMIO(0x2b08) +#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17) +#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16) #define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) #define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) #define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) @@ -8971,10 +8973,6 @@ enum { #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) #define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) -#define POWERGATE_ENABLE _MMIO(0xa210) -#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3) -#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4) - #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) @@ -9114,9 +9112,11 @@ enum { #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) #define GEN9_PG_ENABLE _MMIO(0xA210) -#define GEN9_RENDER_PG_ENABLE REG_BIT(0) -#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) -#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) +#define GEN9_RENDER_PG_ENABLE REG_BIT(0) +#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) +#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) +#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) +#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) #define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) #define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) #define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 16b721080195..620b6fab2c5c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -176,7 +176,9 @@ struct i915_request { struct intel_context *context; struct intel_ring *ring; struct intel_timeline __rcu *timeline; + struct list_head signal_link; + struct llist_node signal_node; /* * The rcu epoch of when this request was allocated. Used to judiciously diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index ffb5287e055a..caa9b041616b 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -314,8 +314,10 @@ static void __vma_release(struct dma_fence_work *work) { struct i915_vma_work *vw = container_of(work, typeof(*vw), base); - if (vw->pinned) + if (vw->pinned) { __i915_gem_object_unpin_pages(vw->pinned); + i915_gem_object_put(vw->pinned); + } i915_vm_free_pt_stash(vw->vm, &vw->stash); i915_vm_put(vw->vm); @@ -431,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma, if (vma->obj) { __i915_gem_object_pin_pages(vma->obj); - work->pinned = vma->obj; + work->pinned = i915_gem_object_get(vma->obj); } } else { vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 34e0d22d456b..cfb806767fc5 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7118,23 +7118,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) { - u32 vd_pg_enable = 0; - unsigned int i; - /* Wa_1409120013:tgl */ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ - for (i = 0; i < I915_MAX_VCS; i++) { - if (HAS_ENGINE(&dev_priv->gt, _VCS(i))) - vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | - VDN_MFX_POWERGATE_ENABLE(i); - } - - I915_WRITE(POWERGATE_ENABLE, - I915_READ(POWERGATE_ENABLE) | vd_pg_enable); - /* Wa_1409825376:tgl (pre-prod)*/ if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 23a6132c5f4e..412e21604a05 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg) return PTR_ERR(obj); obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); + if (IS_ERR(obj2)) { + err = PTR_ERR(obj2); goto put1; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 64bbb8288249..e424a6d1a68c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -2293,8 +2293,10 @@ static int perf_request_latency(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { @@ -2467,8 +2469,10 @@ static int perf_series_engines(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { |