diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_ggtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ggtt.c | 747 |
1 files changed, 555 insertions, 192 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 2049a00417af..08c4e735481b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -8,30 +8,31 @@ #include <linux/types.h> #include <linux/stop_machine.h> -#include <drm/i915_drm.h> -#include <drm/intel-gtt.h> +#include <drm/drm_managed.h> +#include <drm/drm_print.h> +#include <drm/intel/i915_drm.h> +#include <drm/intel/intel-gtt.h> #include "gem/i915_gem_lmem.h" +#include "intel_context.h" #include "intel_ggtt_gmch.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_pci_config.h" +#include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_reg.h" +#include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_gtt.h" #include "gen8_ppgtt.h" - -static inline bool suspend_retains_ptes(struct i915_address_space *vm) -{ - return GRAPHICS_VER(vm->i915) >= 8 && - !HAS_LMEM(vm->i915) && - vm->is_ggtt; -} +#include "intel_engine_pm.h" static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, @@ -104,31 +105,15 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915) return 0; } -/* - * Return the value of the last GGTT pte cast to an u64, if - * the system is supposed to retain ptes across resume. 0 otherwise. - */ -static u64 read_last_pte(struct i915_address_space *vm) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen8_pte_t __iomem *ptep; - - if (!suspend_retains_ptes(vm)) - return 0; - - GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8); - ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1); - return readq(ptep); -} - /** * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM * @vm: The VM to suspend the mappings for + * @evict_all: Evict all VMAs * * Suspend the memory mappings for all objects mapped to HW via the GGTT or a * DPT page table. */ -void i915_ggtt_suspend_vm(struct i915_address_space *vm) +void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all) { struct i915_vma *vma, *vn; int save_skip_rewrite; @@ -174,7 +159,7 @@ retry: goto retry; } - if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { + if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { i915_vma_wait_for_bind(vma); __i915_vma_evict(vma, false); @@ -184,22 +169,24 @@ retry: i915_gem_object_unlock(obj); } - if (!suspend_retains_ptes(vm)) - vm->clear_range(vm, 0, vm->total); - else - i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm); + vm->clear_range(vm, 0, vm->total); vm->skip_pte_rewrite = save_skip_rewrite; mutex_unlock(&vm->mutex); + + drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list)); } void i915_ggtt_suspend(struct i915_ggtt *ggtt) { - i915_ggtt_suspend_vm(&ggtt->vm); + struct intel_gt *gt; + + i915_ggtt_suspend_vm(&ggtt->vm, false); ggtt->invalidate(ggtt); - intel_gt_check_and_clear_faults(ggtt->vm.gt); + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + intel_gt_check_and_clear_faults(gt); } void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) @@ -212,6 +199,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) spin_unlock_irq(&uncore->lock); } +static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915) +{ + /* + * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11) + return true; + + return false; +} + static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; @@ -219,26 +221,65 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) /* * Note that as an uncached mmio write, this will flush the * WCB of the writes into the GGTT before it triggers the invalidate. + * + * Only perform this when GGTT is mapped as WC, see ggtt_probe_common(). */ - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + if (needs_wc_ggtt_mapping(ggtt->vm.i915)) + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, + GFX_FLSH_CNTL_EN); +} + +static void guc_ggtt_ct_invalidate(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + intel_wakeref_t wakeref; + + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) + intel_guc_invalidate_tlb_guc(gt_to_guc(gt)); } static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct drm_i915_private *i915 = ggtt->vm.i915; + struct intel_gt *gt; gen8_ggtt_invalidate(ggtt); - if (GRAPHICS_VER(i915) >= 12) - intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, - GEN12_GUC_TLB_INV_CR_INVALIDATE); - else - intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { + if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt))) + guc_ggtt_ct_invalidate(gt); + else if (GRAPHICS_VER(i915) >= 12) + intel_uncore_write_fw(gt->uncore, + GEN12_GUC_TLB_INV_CR, + GEN12_GUC_TLB_INV_CR_INVALIDATE); + else + intel_uncore_write_fw(gt->uncore, + GEN8_GTCR, GEN8_GTCR_INVALIDATE); + } +} + +static u64 mtl_ggtt_pte_encode(dma_addr_t addr, + unsigned int pat_index, + u32 flags) +{ + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; + + WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK); + + if (flags & PTE_LM) + pte |= GEN12_GGTT_PTE_LM; + + if (pat_index & BIT(0)) + pte |= MTL_GGTT_PTE_PAT0; + + if (pat_index & BIT(1)) + pte |= MTL_GGTT_PTE_PAT1; + + return pte; } u64 gen8_ggtt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; @@ -249,33 +290,212 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local) +{ + *is_present = pte & GEN8_PAGE_PRESENT; + *is_local = pte & GEN12_GGTT_PTE_LM; + + return pte & GEN12_GGTT_PTE_ADDR_MASK; +} + +static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + + return intel_gt_is_bind_context_ready(gt); +} + +static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref) +{ + struct intel_context *ce; + struct intel_gt *gt = ggtt->vm.gt; + + if (intel_gt_is_wedged(gt)) + return NULL; + + ce = gt->engine[BCS0]->bind_context; + GEM_BUG_ON(!ce); + + /* + * If the GT is not awake already at this stage then fallback + * to pci based GGTT update otherwise __intel_wakeref_get_first() + * would conflict with fs_reclaim trying to allocate memory while + * doing rpm_resume(). + */ + *wakeref = intel_gt_pm_get_if_awake(gt); + if (!*wakeref) + return NULL; + + intel_engine_pm_get(ce->engine); + + return ce; +} + +static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref) +{ + intel_engine_pm_put(ce->engine); + intel_gt_pm_put(ce->engine->gt, wakeref); +} + +static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset, + struct sg_table *pages, u32 num_entries, + const gen8_pte_t pte) +{ + struct i915_sched_attr attr = {}; + struct intel_gt *gt = ggtt->vm.gt; + const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode; + struct sgt_iter iter; + struct i915_request *rq; + struct intel_context *ce; + intel_wakeref_t wakeref; + u32 *cs; + + if (!num_entries) + return true; + + ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref); + if (!ce) + return false; + + if (pages) + iter = __sgt_iter(pages->sgl, true); + + while (num_entries) { + int count = 0; + dma_addr_t addr; + /* + * MI_UPDATE_GTT can update 512 entries in a single command but + * that end up with engine reset, 511 works. + */ + u32 n_ptes = min_t(u32, 511, num_entries); + + if (mutex_lock_interruptible(&ce->timeline->mutex)) + goto put_ce; + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); + intel_context_exit(ce); + if (IS_ERR(rq)) { + GT_TRACE(gt, "Failed to get bind request\n"); + mutex_unlock(&ce->timeline->mutex); + goto put_ce; + } + + cs = intel_ring_begin(rq, 2 * n_ptes + 2); + if (IS_ERR(cs)) { + GT_TRACE(gt, "Failed to ring space for GGTT bind\n"); + i915_request_set_error_once(rq, PTR_ERR(cs)); + /* once a request is created, it must be queued */ + goto queue_err_rq; + } + + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); + *cs++ = offset << 12; + + if (pages) { + for_each_sgt_daddr_next(addr, iter) { + if (count == n_ptes) + break; + *cs++ = lower_32_bits(pte | addr); + *cs++ = upper_32_bits(pte | addr); + count++; + } + /* fill remaining with scratch pte, if any */ + if (count < n_ptes) { + memset64((u64 *)cs, scratch_pte, + n_ptes - count); + cs += (n_ptes - count) * 2; + } + } else { + memset64((u64 *)cs, pte, n_ptes); + cs += n_ptes * 2; + } + + intel_ring_advance(rq, cs); +queue_err_rq: + i915_request_get(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&ce->timeline->mutex); + /* This will break if the request is complete or after engine reset */ + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + if (rq->fence.error) + goto err_rq; + + i915_request_put(rq); + + num_entries -= n_ptes; + offset += n_ptes; + } + + gen8_ggtt_bind_put_ce(ce, wakeref); + return true; + +err_rq: + i915_request_put(rq); +put_ce: + gen8_ggtt_bind_put_ce(ce, wakeref); + return false; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); } +static gen8_pte_t gen8_get_pte(void __iomem *addr) +{ + return readq(addr); +} + static void gen8_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags)); + gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags)); ggtt->invalidate(ggtt); } +static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local); +} + +static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm, + dma_addr_t addr, u64 offset, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t pte; + + pte = ggtt->vm.pte_encode(addr, pat_index, flags); + if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) && + gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { - const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); gen8_pte_t __iomem *gte; gen8_pte_t __iomem *end; struct sgt_iter iter; @@ -287,8 +507,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, */ gte = (gen8_pte_t __iomem *)ggtt->gsm; - gte += vma_res->start / I915_GTT_PAGE_SIZE; - end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; + gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0]->encode); + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; for_each_sgt_daddr(addr, iter, vma_res->bi.pages) gen8_set_pte(gte++, pte_encode | addr); @@ -305,21 +528,118 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t pte_encode; + u64 start, end; + + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) + goto err; + + start = end; + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages, + vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode)) + goto err; + + start += vma_res->node_size / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) + goto err; + + return true; + +err: + return false; +} + +static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) && + __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); +} + +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t __iomem *gtt_base = + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); +} + +static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry, + NULL, num_entries, scratch_pte)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_clear_range(vm, start, length); +} + static void gen6_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); gen6_pte_t __iomem *pte = (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - iowrite32(vm->pte_encode(addr, level, flags), pte); + iowrite32(vm->pte_encode(addr, pat_index, flags), pte); ggtt->invalidate(ggtt); } +static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, + bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + return vm->pte_decode(ioread32(pte), is_present, is_local); +} + /* * Binds an object into the global gtt with the specified cache level. * The object will be accessible to the GPU via commands whose operands @@ -328,7 +648,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, */ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); @@ -338,11 +658,14 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, dma_addr_t addr; gte = (gen6_pte_t __iomem *)ggtt->gsm; - gte += vma_res->start / I915_GTT_PAGE_SIZE; - end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE; + gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; + while (gte < end) + iowrite32(vm->scratch[0]->encode, gte++); + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; for_each_sgt_daddr(addr, iter, vma_res->bi.pages) - iowrite32(vm->pte_encode(addr, level, flags), gte++); + iowrite32(vm->pte_encode(addr, pat_index, flags), gte++); GEM_BUG_ON(gte > end); /* Fill the allocated but "unused" space beyond the end of the buffer */ @@ -361,27 +684,6 @@ static void nop_clear_range(struct i915_address_space *vm, { } -static void gen8_ggtt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch[0]->encode; - gen8_pte_t __iomem *gtt_base = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - const int max_entries = ggtt_total_entries(ggtt) - first_entry; - int i; - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - for (i = 0; i < num_entries; i++) - gen8_set_pte(>t_base[i], scratch_pte); -} - static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) { /* @@ -398,14 +700,15 @@ struct insert_page { struct i915_address_space *vm; dma_addr_t addr; u64 offset; - enum i915_cache_level level; + unsigned int pat_index; }; static int bxt_vtd_ggtt_insert_page__cb(void *_arg) { struct insert_page *arg = _arg; - gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); + gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, + arg->pat_index, 0); bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -414,10 +717,10 @@ static int bxt_vtd_ggtt_insert_page__cb(void *_arg) static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, dma_addr_t addr, u64 offset, - enum i915_cache_level level, + unsigned int pat_index, u32 unused) { - struct insert_page arg = { vm, addr, offset, level }; + struct insert_page arg = { vm, addr, offset, pat_index }; stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); } @@ -425,7 +728,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, struct insert_entries { struct i915_address_space *vm; struct i915_vma_resource *vma_res; - enum i915_cache_level level; + unsigned int pat_index; u32 flags; }; @@ -433,7 +736,8 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; - gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags); + gen8_ggtt_insert_entries(arg->vm, arg->vma_res, + arg->pat_index, arg->flags); bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -441,10 +745,10 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, struct i915_vma_resource *vma_res, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { - struct insert_entries arg = { vm, vma_res, level, flags }; + struct insert_entries arg = { vm, vma_res, pat_index, flags }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } @@ -473,7 +777,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, void intel_ggtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, - enum i915_cache_level cache_level, + unsigned int pat_index, u32 flags) { u32 pte_flags; @@ -490,7 +794,7 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm, if (vma_res->bi.lmem) pte_flags |= PTE_LM; - vm->insert_entries(vm, vma_res, cache_level, pte_flags); + vm->insert_entries(vm, vma_res, pat_index, pte_flags); vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; } @@ -500,20 +804,39 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm, vm->clear_range(vm, vma_res->start, vma_res->vma_size); } +dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + return ggtt->vm.read_entry(vm, offset, is_present, is_local); +} + +/* + * Reserve the top of the GuC address space for firmware images. Addresses + * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC, + * which makes for a suitable range to hold GuC/HuC firmware images if the + * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT + * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk + * of the same size anyway, which is far more than needed, to keep the logic + * in uc_fw_ggtt_offset() simple. + */ +#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP) + static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) { - u64 size; + u64 offset; int ret; if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) return 0; - GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); - size = ggtt->vm.total - GUC_GGTT_TOP; + GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE); + offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE; - ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size, - GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, - PIN_NOEVICT); + ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, + GUC_TOP_RESERVE_SIZE, offset, + I915_COLOR_UNEVICTABLE, PIN_NOEVICT); if (ret) drm_dbg(&ggtt->vm.i915->drm, "Failed to reserve top of GGTT for GuC\n"); @@ -551,8 +874,6 @@ static int init_ggtt(struct i915_ggtt *ggtt) struct drm_mm_node *entry; int ret; - ggtt->pte_lost = true; - /* * GuC requires all resources that we're sharing with it to be placed in * non-WOPCM memory. If GuC is not present or not in use we still need a @@ -560,7 +881,7 @@ static int init_ggtt(struct i915_ggtt *ggtt) * why. */ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); + intel_wopcm_guc_size(&ggtt->vm.gt->wopcm)); ret = intel_vgt_balloon(ggtt); if (ret) @@ -585,8 +906,12 @@ static int init_ggtt(struct i915_ggtt *ggtt) * paths, and we trust that 0 will remain reserved. However, * the only likely reason for failure to insert is a driver * bug, which we expect to cause other failures... + * + * Since CPU can perform speculative reads on error capture + * (write-combining allows it) add scratch page after error + * capture to avoid DMAR errors. */ - ggtt->error_capture.size = I915_GTT_PAGE_SIZE; + ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE; ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) drm_mm_insert_node_in_range(&ggtt->vm.mm, @@ -596,11 +921,15 @@ static int init_ggtt(struct i915_ggtt *ggtt) 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); } - if (drm_mm_node_allocated(&ggtt->error_capture)) + if (drm_mm_node_allocated(&ggtt->error_capture)) { + u64 start = ggtt->error_capture.start; + u64 size = ggtt->error_capture.size; + + ggtt->vm.scratch_range(&ggtt->vm, start, size); drm_dbg(&ggtt->vm.i915->drm, "Reserved GGTT:[%llx, %llx] for use by error capture\n", - ggtt->error_capture.start, - ggtt->error_capture.start + ggtt->error_capture.size); + start, start + size); + } /* * The upper portion of the GuC address space has a sizeable hole @@ -633,7 +962,7 @@ err: static void aliasing_gtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, - enum i915_cache_level cache_level, + unsigned int pat_index, u32 flags) { u32 pte_flags; @@ -645,10 +974,10 @@ static void aliasing_gtt_bind_vma(struct i915_address_space *vm, if (flags & I915_VMA_LOCAL_BIND) ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, - stash, vma_res, cache_level, flags); + stash, vma_res, pat_index, flags); if (flags & I915_VMA_GLOBAL_BIND) - vm->insert_entries(vm, vma_res, cache_level, pte_flags); + vm->insert_entries(vm, vma_res, pat_index, pte_flags); vma_res->bound_flags |= flags; } @@ -866,25 +1195,26 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { struct drm_i915_private *i915 = ggtt->vm.i915; + struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); phys_addr_t phys_addr; u32 pte_flags; int ret; - GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); - phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915); + GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); - /* - * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range - * will be dropped. For WC mappings in general we have 64 byte burst - * writes when the WC buffer is flushed, so we can't use it, but have to - * resort to an uncached mapping. The WC issue is easily caught by the - * readback check when writing GTT PTE entries. - */ - if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11) - ggtt->gsm = ioremap(phys_addr, size); - else + if (i915_direct_stolen_access(i915)) { + drm_dbg(&i915->drm, "Using direct GSM access\n"); + phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK; + } else { + phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915); + } + + if (needs_wc_ggtt_mapping(i915)) ggtt->gsm = ioremap_wc(phys_addr, size); + else + ggtt->gsm = ioremap(phys_addr, size); + if (!ggtt->gsm) { drm_err(&i915->drm, "Failed to map the ggtt page table\n"); return -ENOMEM; @@ -905,7 +1235,9 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) ggtt->vm.scratch[0]->encode = ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), - I915_CACHE_NONE, pte_flags); + i915_gem_get_pat_index(i915, + I915_CACHE_NONE), + pte_flags); return 0; } @@ -920,8 +1252,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm) static struct resource pci_resource(struct pci_dev *pdev, int bar) { - return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar)); + return DEFINE_RES_MEM(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -931,11 +1263,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) unsigned int size; u16 snb_gmch_ctl; - if (!HAS_LMEM(i915)) { - if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + if (!HAS_LMEM(i915) && !HAS_LMEMBAR_SMEM_STOLEN(i915)) { + if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR)) return -ENXIO; - ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); + ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); } @@ -953,10 +1285,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.cleanup = gen6_gmch_remove; ggtt->vm.insert_page = gen8_ggtt_insert_page; ggtt->vm.clear_range = nop_clear_range; - if (intel_scanout_needs_vtd_wa(i915)) - ggtt->vm.clear_range = gen8_ggtt_clear_range; + ggtt->vm.scratch_range = gen8_ggtt_clear_range; ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + ggtt->vm.read_entry = gen8_ggtt_read_entry; /* * Serialize GTT updates with aperture access on BXT if VT-d is on, @@ -979,25 +1311,47 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } - ggtt->invalidate = gen8_ggtt_invalidate; + if (i915_ggtt_require_binder(i915)) { + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind; + ggtt->vm.insert_page = gen8_ggtt_insert_page_bind; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind; + /* + * On GPU is hung, we might bind VMAs for error capture. + * Fallback to CPU GGTT updates in that case. + */ + ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; + } + + if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc)) + ggtt->invalidate = guc_ggtt_invalidate; + else + ggtt->invalidate = gen8_ggtt_invalidate; ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; - ggtt->vm.pte_encode = gen8_ggtt_pte_encode; + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) + ggtt->vm.pte_encode = mtl_ggtt_pte_encode; + else + ggtt->vm.pte_encode = gen8_ggtt_pte_encode; - setup_private_pat(ggtt->vm.gt->uncore); + ggtt->vm.pte_decode = gen8_ggtt_pte_decode; return ggtt_probe_common(ggtt, size); } +/* + * For pre-gen8 platforms pat_index is the same as enum i915_cache_level, + * so the switch-case statements in these PTE encode functions are still valid. + * See translation table LEGACY_CACHELEVEL. + */ static u64 snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - switch (level) { + switch (pat_index) { case I915_CACHE_L3_LLC: case I915_CACHE_LLC: pte |= GEN6_PTE_CACHE_LLC; @@ -1006,19 +1360,19 @@ static u64 snb_pte_encode(dma_addr_t addr, pte |= GEN6_PTE_UNCACHED; break; default: - MISSING_CASE(level); + MISSING_CASE(pat_index); } return pte; } static u64 ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - switch (level) { + switch (pat_index) { case I915_CACHE_L3_LLC: pte |= GEN7_PTE_CACHE_L3_LLC; break; @@ -1029,14 +1383,14 @@ static u64 ivb_pte_encode(dma_addr_t addr, pte |= GEN6_PTE_UNCACHED; break; default: - MISSING_CASE(level); + MISSING_CASE(pat_index); } return pte; } static u64 byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; @@ -1044,31 +1398,31 @@ static u64 byt_pte_encode(dma_addr_t addr, if (!(flags & PTE_READ_ONLY)) pte |= BYT_PTE_WRITEABLE; - if (level != I915_CACHE_NONE) + if (pat_index != I915_CACHE_NONE) pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; return pte; } static u64 hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - if (level != I915_CACHE_NONE) + if (pat_index != I915_CACHE_NONE) pte |= HSW_WB_LLC_AGE3; return pte; } static u64 iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags) { gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; - switch (level) { + switch (pat_index) { case I915_CACHE_NONE: break; case I915_CACHE_WT: @@ -1082,6 +1436,14 @@ static u64 iris_pte_encode(dma_addr_t addr, return pte; } +static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local) +{ + *is_present = pte & GEN6_PTE_VALID; + *is_local = false; + + return ((pte & 0xff0) << 28) | (pte & ~0xfff); +} + static int gen6_gmch_probe(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; @@ -1089,10 +1451,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) unsigned int size; u16 snb_gmch_ctl; - if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR)) return -ENXIO; - ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); + ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); /* @@ -1115,10 +1477,12 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.alloc_scratch_dma = alloc_pt_dma; ggtt->vm.clear_range = nop_clear_range; - if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) + if (!HAS_FULL_PPGTT(i915)) ggtt->vm.clear_range = gen6_ggtt_clear_range; + ggtt->vm.scratch_range = gen6_ggtt_clear_range; ggtt->vm.insert_page = gen6_ggtt_insert_page; ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.read_entry = gen6_ggtt_read_entry; ggtt->vm.cleanup = gen6_gmch_remove; ggtt->invalidate = gen6_ggtt_invalidate; @@ -1134,6 +1498,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) else ggtt->vm.pte_encode = snb_pte_encode; + ggtt->vm.pte_decode = gen6_pte_decode; + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; @@ -1196,7 +1562,14 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) */ int i915_ggtt_probe_hw(struct drm_i915_private *i915) { - int ret; + struct intel_gt *gt; + int ret, i; + + for_each_gt(gt, i915, i) { + ret = intel_gt_assign_ggtt(gt); + if (ret) + return ret; + } ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915)); if (ret) @@ -1208,40 +1581,31 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) return 0; } -int i915_ggtt_enable_hw(struct drm_i915_private *i915) +struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915) { - if (GRAPHICS_VER(i915) < 6) - return intel_ggtt_gmch_enable_hw(i915); + struct i915_ggtt *ggtt; - return 0; -} - -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) -{ - GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); + ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) + return ERR_PTR(-ENOMEM); - ggtt->invalidate = guc_ggtt_invalidate; + INIT_LIST_HEAD(&ggtt->gt_list); - ggtt->invalidate(ggtt); + return ggtt; } -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) +int i915_ggtt_enable_hw(struct drm_i915_private *i915) { - /* XXX Temporary pardon for error unload */ - if (ggtt->invalidate == gen8_ggtt_invalidate) - return; - - /* We should only be called after i915_ggtt_enable_guc() */ - GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - - ggtt->invalidate = gen8_ggtt_invalidate; + if (GRAPHICS_VER(i915) < 6) + return intel_ggtt_gmch_enable_hw(i915); - ggtt->invalidate(ggtt); + return 0; } /** * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM * @vm: The VM to restore the mappings for + * @all_evicted: Were all VMAs expected to be evicted on suspend? * * Restore the memory mappings for all objects mapped to HW via the GGTT or a * DPT page table. @@ -1249,24 +1613,20 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) * Returns %true if restoring the mapping for any object that was in a write * domain before suspend. */ -bool i915_ggtt_resume_vm(struct i915_address_space *vm) +bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted) { struct i915_vma *vma; bool write_domain_objs = false; - bool retained_ptes; drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); - /* - * First fill our portion of the GTT with scratch pages if - * they were not retained across suspend. - */ - retained_ptes = suspend_retains_ptes(vm) && - !i915_vm_to_ggtt(vm)->pte_lost && - !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm)); + if (all_evicted) { + drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list)); + return false; + } - if (!retained_ptes) - vm->clear_range(vm, 0, vm->total); + /* First fill our portion of the GTT with scratch pages */ + vm->clear_range(vm, 0, vm->total); /* clflush objects bound into the GGTT and rebind them. */ list_for_each_entry(vma, &vm->bound_list, vm_link) { @@ -1275,16 +1635,18 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - if (!retained_ptes) { - /* - * Clear the bound flags of the vma resource to allow - * ptes to be repopulated. - */ - vma->resource->bound_flags = 0; - vma->ops->bind_vma(vm, NULL, vma->resource, - obj ? obj->cache_level : 0, - was_bound); - } + + /* + * Clear the bound flags of the vma resource to allow + * ptes to be repopulated. + */ + vma->resource->bound_flags = 0; + vma->ops->bind_vma(vm, NULL, vma->resource, + obj ? obj->pat_index : + i915_gem_get_pat_index(vm->i915, + I915_CACHE_NONE), + was_bound); + if (obj) { /* only used during resume => exclusive access */ write_domain_objs |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; @@ -1296,24 +1658,25 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) void i915_ggtt_resume(struct i915_ggtt *ggtt) { + struct intel_gt *gt; bool flush; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + intel_gt_check_and_clear_faults(gt); + + flush = i915_ggtt_resume_vm(&ggtt->vm, false); + + if (drm_mm_node_allocated(&ggtt->error_capture)) + ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start, + ggtt->error_capture.size); - flush = i915_ggtt_resume_vm(&ggtt->vm); + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + intel_uc_resume_mappings(>->uc); ggtt->invalidate(ggtt); if (flush) wbinvd_on_all_cpus(); - if (GRAPHICS_VER(ggtt->vm.i915) >= 8) - setup_private_pat(ggtt->vm.gt->uncore); - intel_ggtt_restore_fences(ggtt); } - -void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val) -{ - to_gt(i915)->ggtt->pte_lost = val; -} |
