diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/intel_gt.c | 425 |
1 files changed, 145 insertions, 280 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f0dbfc434e07..3d3b1ba76e2b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -4,7 +4,7 @@ */ #include <drm/drm_managed.h> -#include <drm/intel-gtt.h> +#include <drm/intel/intel-gtt.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" @@ -28,12 +28,12 @@ #include "intel_migrate.h" #include "intel_mocs.h" #include "intel_pci_config.h" -#include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" #include "intel_sa_media.h" #include "intel_gt_sysfs.h" +#include "intel_tlb.h" #include "intel_uncore.h" #include "shmem_utils.h" @@ -51,8 +51,7 @@ void intel_gt_common_init_early(struct intel_gt *gt) intel_gt_init_reset(gt); intel_gt_init_requests(gt); intel_gt_init_timelines(gt); - mutex_init(>->tlb.invalidate_lock); - seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); + intel_gt_init_tlb(gt); intel_gt_pm_init_early(gt); intel_wopcm_init_early(>->wopcm); @@ -63,7 +62,13 @@ void intel_gt_common_init_early(struct intel_gt *gt) /* Preliminary initialization of Tile 0 */ int intel_root_gt_init_early(struct drm_i915_private *i915) { - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + + gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); + if (!gt) + return -ENOMEM; + + i915->gt[0] = gt; gt->i915 = i915; gt->uncore = &i915->uncore; @@ -180,7 +185,7 @@ int intel_gt_init_hw(struct intel_gt *gt) if (IS_HASWELL(i915)) intel_uncore_write(uncore, HSW_MI_PREDICATE_RESULT_2, - IS_HSW_GT3(i915) ? + INTEL_INFO(i915)->gt == 3 ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ @@ -263,10 +268,21 @@ intel_gt_clear_error_registers(struct intel_gt *gt, I915_MASTER_ERROR_INTERRUPT); } - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + /* + * For the media GT, this ring fault register is not replicated, + * so don't do multicast/replicated register read/write operation on it. + */ + if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) { + intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG, + RING_FAULT_VALID, 0); + intel_uncore_posting_read(uncore, + XELPMP_RING_FAULT_REG); + + } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG, RING_FAULT_VALID, 0); intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); + } else if (GRAPHICS_VER(i915) >= 12) { intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0); intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); @@ -286,25 +302,48 @@ static void gen6_check_faults(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - u32 fault; for_each_engine(engine, gt, id) { + u32 fault; + fault = GEN6_RING_FAULT_REG_READ(engine); + if (fault & RING_FAULT_VALID) { gt_dbg(gt, "Unexpected fault\n" - "\tAddr: 0x%08lx\n" + "\tAddr: 0x%08x\n" "\tAddress space: %s\n" "\tSource ID: %d\n" "\tType: %d\n", - fault & PAGE_MASK, + fault & RING_FAULT_VADDR_MASK, fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault), + REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault)); } } } +static void gen8_report_fault(struct intel_gt *gt, u32 fault, + u32 fault_data0, u32 fault_data1) +{ + u64 fault_addr; + + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + + gt_dbg(gt, "Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + REG_FIELD_GET(RING_FAULT_ENGINE_ID_MASK, fault), + REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault), + REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault)); +} + static void xehp_check_faults(struct intel_gt *gt) { u32 fault; @@ -317,28 +356,10 @@ static void xehp_check_faults(struct intel_gt *gt) * toward the primary instance. */ fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); - if (fault & RING_FAULT_VALID) { - u32 fault_data0, fault_data1; - u64 fault_addr; - - fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0); - fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1); - - fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | - ((u64)fault_data0 << 12); - - gt_dbg(gt, "Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } + if (fault & RING_FAULT_VALID) + gen8_report_fault(gt, fault, + intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0), + intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1)); } static void gen8_check_faults(struct intel_gt *gt) @@ -358,28 +379,10 @@ static void gen8_check_faults(struct intel_gt *gt) } fault = intel_uncore_read(uncore, fault_reg); - if (fault & RING_FAULT_VALID) { - u32 fault_data0, fault_data1; - u64 fault_addr; - - fault_data0 = intel_uncore_read(uncore, fault_data0_reg); - fault_data1 = intel_uncore_read(uncore, fault_data1_reg); - - fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | - ((u64)fault_data0 << 12); - - gt_dbg(gt, "Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } + if (fault & RING_FAULT_VALID) + gen8_report_fault(gt, fault, + intel_uncore_read(uncore, fault_data0_reg), + intel_uncore_read(uncore, fault_data1_reg)); } void intel_gt_check_and_clear_faults(struct intel_gt *gt) @@ -387,7 +390,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) xehp_check_faults(gt); else if (GRAPHICS_VER(i915) >= 8) gen8_check_faults(gt); @@ -435,7 +438,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) spin_lock_irqsave(&uncore->lock, flags); intel_uncore_posting_read_fw(uncore, - RING_HEAD(RENDER_RING_BASE)); + RING_TAIL(RENDER_RING_BASE)); spin_unlock_irqrestore(&uncore->lock, flags); } } @@ -467,7 +470,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE | I915_BO_ALLOC_GPU_ONLY); - if (IS_ERR(obj)) + if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */ obj = i915_gem_object_create_stolen(i915, size); if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, size); @@ -737,12 +740,12 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_gt; - intel_uc_init_late(>->uc); - err = i915_inject_probe_error(gt->i915, -EIO); if (err) goto err_gt; + intel_uc_init_late(>->uc); + intel_migrate_init(>->migrate, gt); goto out_fw; @@ -785,6 +788,29 @@ void intel_gt_driver_unregister(struct intel_gt *gt) intel_gsc_fini(>->gsc); /* + * If we unload the driver and wedge before the GSC worker is complete, + * the worker will hit an error on its submission to the GSC engine and + * then exit. This is hard to hit for a user, but it is reproducible + * with skipping selftests. The error is handled gracefully by the + * worker, so there are no functional issues, but we still end up with + * an error message in dmesg, which is something we want to avoid as + * this is a supported scenario. We could modify the worker to better + * handle a wedging occurring during its execution, but that gets + * complicated for a couple of reasons: + * - We do want the error on runtime wedging, because there are + * implications for subsystems outside of GT (i.e., PXP, HDCP), it's + * only the error on driver unload that we want to silence. + * - The worker is responsible for multiple submissions (GSC FW load, + * HuC auth, SW proxy), so all of those will have to be adapted to + * handle the wedged_on_fini scenario. + * Therefore, it's much simpler to just wait for the worker to be done + * before wedging on driver removal, also considering that the worker + * will likely already be idle in the great majority of non-selftest + * scenarios. + */ + intel_gsc_uc_flush_work(>->uc.gsc); + + /* * Upon unregistering the device to prevent any new users, cancel * all in-flight requests so that we can quickly unbind the active * resources. @@ -793,7 +819,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) /* Scrub all HW state upon release */ with_intel_runtime_pm(gt->uncore->rpm, wakeref) - __intel_gt_reset(gt, ALL_ENGINES); + intel_gt_reset_all_engines(gt); } void intel_gt_driver_release(struct intel_gt *gt) @@ -824,7 +850,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915) intel_gt_fini_requests(gt); intel_gt_fini_reset(gt); intel_gt_fini_timelines(gt); - mutex_destroy(>->tlb.invalidate_lock); + intel_gt_fini_tlb(gt); intel_engines_free(gt); } } @@ -865,7 +891,7 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) int intel_gt_probe_all(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - struct intel_gt *gt = &i915->gt0; + struct intel_gt *gt = to_gt(i915); const struct intel_gt_definition *gtdef; phys_addr_t phys_addr; unsigned int mmio_bar; @@ -882,15 +908,13 @@ int intel_gt_probe_all(struct drm_i915_private *i915) */ gt->i915 = i915; gt->name = "Primary GT"; - gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; + gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask; gt_dbg(gt, "Setting up %s\n", gt->name); ret = intel_gt_tile_setup(gt, phys_addr); if (ret) return ret; - i915->gt[0] = gt; - if (!HAS_EXTRA_GT_LIST(i915)) return 0; @@ -945,8 +969,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915) err: i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); - intel_gt_release_all(i915); - return ret; } @@ -965,15 +987,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915) return 0; } -void intel_gt_release_all(struct drm_i915_private *i915) -{ - struct intel_gt *gt; - unsigned int id; - - for_each_gt(gt, i915, id) - i915->gt[id] = NULL; -} - void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p) { @@ -982,226 +995,78 @@ void intel_gt_info_print(const struct intel_gt_info *info, intel_sseu_dump(&info->sseu, p); } -struct reg_and_bit { - union { - i915_reg_t reg; - i915_mcr_reg_t mcr_reg; - }; - u32 bit; -}; - -static struct reg_and_bit -get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, - const i915_reg_t *regs, const unsigned int num) +enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, + struct drm_i915_gem_object *obj, + bool always_coherent) { - const unsigned int class = engine->class; - struct reg_and_bit rb = { }; - - if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg)) - return rb; - - rb.reg = regs[class]; - if (gen8 && class == VIDEO_DECODE_CLASS) - rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ + /* + * Wa_22016122933: always return I915_MAP_WC for Media + * version 13.0 when the object is on the Media GT + */ + if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt)) + return I915_MAP_WC; + if (HAS_LLC(gt->i915) || always_coherent) + return I915_MAP_WB; else - rb.bit = engine->instance; - - rb.bit = BIT(rb.bit); - - return rb; + return I915_MAP_WC; } -/* - * HW architecture suggest typical invalidation time at 40us, - * with pessimistic cases up to 100us and a recommendation to - * cap at 1ms. We go a bit higher just in case. - */ -#define TLB_INVAL_TIMEOUT_US 100 -#define TLB_INVAL_TIMEOUT_MS 4 - -/* - * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets - * but are now considered MCR registers. Since they exist within a GAM range, - * the primary instance of the register rolls up the status from each unit. - */ -static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb) +bool intel_gt_needs_wa_16018031267(struct intel_gt *gt) { - if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) - return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0, - TLB_INVAL_TIMEOUT_US, - TLB_INVAL_TIMEOUT_MS); - else - return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0, - TLB_INVAL_TIMEOUT_US, - TLB_INVAL_TIMEOUT_MS, - NULL); + /* Wa_16018031267, Wa_16018063123 */ + return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71)); } -static void mmio_invalidate_full(struct intel_gt *gt) +bool intel_gt_needs_wa_22016122933(struct intel_gt *gt) { - static const i915_reg_t gen8_regs[] = { - [RENDER_CLASS] = GEN8_RTCR, - [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ - [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, - [COPY_ENGINE_CLASS] = GEN8_BTCR, - }; - static const i915_reg_t gen12_regs[] = { - [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR, - [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, - [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, - [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, - [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR, - }; - static const i915_mcr_reg_t xehp_regs[] = { - [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR, - [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR, - [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR, - [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR, - [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR, - }; - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - intel_engine_mask_t awake, tmp; - enum intel_engine_id id; - const i915_reg_t *regs; - unsigned int num = 0; - unsigned long flags; - - /* - * New platforms should not be added with catch-all-newer (>=) - * condition so that any later platform added triggers the below warning - * and in turn mandates a human cross-check of whether the invalidation - * flows have compatible semantics. - * - * For instance with the 11.00 -> 12.00 transition three out of five - * respective engine registers were moved to masked type. Then after the - * 12.00 -> 12.50 transition multi cast handling is required too. - */ - - if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) || - GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { - regs = NULL; - num = ARRAY_SIZE(xehp_regs); - } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) || - GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) { - regs = gen12_regs; - num = ARRAY_SIZE(gen12_regs); - } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) { - regs = gen8_regs; - num = ARRAY_SIZE(gen8_regs); - } else if (GRAPHICS_VER(i915) < 8) { - return; - } - - if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!")) - return; - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - intel_gt_mcr_lock(gt, &flags); - spin_lock(&uncore->lock); /* serialise invalidate with GT reset */ - - awake = 0; - for_each_engine(engine, gt, id) { - struct reg_and_bit rb; - - if (!intel_engine_pm_is_awake(engine)) - continue; - - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { - u32 val = BIT(engine->instance); - - if (engine->class == VIDEO_DECODE_CLASS || - engine->class == VIDEO_ENHANCEMENT_CLASS || - engine->class == COMPUTE_CLASS) - val = _MASKED_BIT_ENABLE(val); - intel_gt_mcr_multicast_write_fw(gt, - xehp_regs[engine->class], - val); - } else { - rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); - if (!i915_mmio_reg_offset(rb.reg)) - continue; - - if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS || - engine->class == VIDEO_ENHANCEMENT_CLASS || - engine->class == COMPUTE_CLASS)) - rb.bit = _MASKED_BIT_ENABLE(rb.bit); - - intel_uncore_write_fw(uncore, rb.reg, rb.bit); - } - awake |= engine->mask; - } - - GT_TRACE(gt, "invalidated engines %08x\n", awake); - - /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */ - if (awake && - (IS_TIGERLAKE(i915) || - IS_DG1(i915) || - IS_ROCKETLAKE(i915) || - IS_ALDERLAKE_S(i915) || - IS_ALDERLAKE_P(i915))) - intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1); - - spin_unlock(&uncore->lock); - intel_gt_mcr_unlock(gt, flags); - - for_each_engine_masked(engine, gt, awake, tmp) { - struct reg_and_bit rb; - - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { - rb.mcr_reg = xehp_regs[engine->class]; - rb.bit = BIT(engine->instance); - } else { - rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); - } - - if (wait_for_invalidate(gt, rb)) - gt_err_ratelimited(gt, "%s TLB invalidation did not complete in %ums!\n", - engine->name, TLB_INVAL_TIMEOUT_MS); - } - - /* - * Use delayed put since a) we mostly expect a flurry of TLB - * invalidations so it is good to avoid paying the forcewake cost and - * b) it works around a bug in Icelake which cannot cope with too rapid - * transitions. - */ - intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); + return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA; } -static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) +static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready) { - u32 cur = intel_gt_tlb_seqno(gt); + struct intel_engine_cs *engine = gt->engine[BCS0]; - /* Only skip if a *full* TLB invalidate barrier has passed */ - return (s32)(cur - ALIGN(seqno, 2)) > 0; + if (engine && engine->bind_context) + engine->bind_context_ready = ready; } -void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno) +/** + * intel_gt_bind_context_set_ready - Set the context binding as ready + * + * @gt: GT structure + * + * This function marks the binder context as ready. + */ +void intel_gt_bind_context_set_ready(struct intel_gt *gt) { - intel_wakeref_t wakeref; - - if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) - return; + __intel_gt_bind_context_set_ready(gt, true); +} - if (intel_gt_is_wedged(gt)) - return; +/** + * intel_gt_bind_context_set_unready - Set the context binding as ready + * @gt: GT structure + * + * This function marks the binder context as not ready. + */ - if (tlb_seqno_passed(gt, seqno)) - return; +void intel_gt_bind_context_set_unready(struct intel_gt *gt) +{ + __intel_gt_bind_context_set_ready(gt, false); +} - with_intel_gt_pm_if_awake(gt, wakeref) { - mutex_lock(>->tlb.invalidate_lock); - if (tlb_seqno_passed(gt, seqno)) - goto unlock; +/** + * intel_gt_is_bind_context_ready - Check if context binding is ready + * + * @gt: GT structure + * + * This function returns binder context's ready status. + */ +bool intel_gt_is_bind_context_ready(struct intel_gt *gt) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; - mmio_invalidate_full(gt); + if (engine) + return engine->bind_context_ready; - write_seqcount_invalidate(>->tlb.seqno); -unlock: - mutex_unlock(>->tlb.invalidate_lock); - } + return false; } |
