From 90007bca61627bb2fbd63bc3da0277abe4a43550 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Tue, 15 Aug 2017 16:16:48 -0700 Subject: drm/i915/cnl: Introduce initial Cannonlake Workarounds. Let's inherit workarounds from previous platforms that according to wa_database and BSpec are still valid for Cannonlake. v2: Add missed workarounds. v3: Rebase v4: Remove bad chunk that was added to rc6 disable. (Ander) Also remove A0 W/a that are not needed anymore. v5: Rebase on top of CFL. v6: Remove empty gen9_init_perctx_bb and gen9_init_indirectctx_bb since they don't carry any gen10 related W/a. (by Oscar). Also Remove A0 exclusive workaround. v7: Remove more A0 exclusive workarounds. As pointed out by Oscar many workarounds were changed to be A0 only so let's remove them. Cc: Oscar Mateo Cc: Mika Kuoppala Signed-off-by: Rodrigo Vivi Reviewed-by: Oscar Mateo Link: https://patchwork.freedesktop.org/patch/msgid/20170815231651.975-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d60f38adc4c4..0f7399801398 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1885,12 +1885,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */ if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); else if (IS_CHERRYVIEW(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_BC(dev_priv)) + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); else if (IS_GEN9_LP(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); -- cgit From 66df1014efbadbdd2a550c6c67a815db9b4d05bd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Aug 2017 18:38:28 +0100 Subject: drm/i915: Keep a small stash of preallocated WC pages We use WC pages for coherent writes into the ppGTT on !llc architectures. However, to create a WC page requires a stop_machine(), i.e. is very slow. To compensate we currently keep a per-vm cache of recently freed pages, but we still see the slow startup of new contexts. We can amoritize that cost slightly by allocating WC pages in small batches (PAGEVEC_SIZE == 14) and since creating a WC page implies a stop_machine() there is no penalty for keeping that stash global. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20170822173828.5932-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 122 +++++++++++++++++++++++++++++------- 1 file changed, 98 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0f7399801398..708b95cd8c30 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { - struct page *page; + struct pagevec *pvec = &vm->free_pages; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - if (vm->free_pages.nr) - return vm->free_pages.pages[--vm->free_pages.nr]; + if (likely(pvec->nr)) + return pvec->pages[--pvec->nr]; + + if (!vm->pt_kmap_wc) + return alloc_page(gfp); + + /* A placeholder for a specific mutex to guard the WC stash */ + lockdep_assert_held(&vm->i915->drm.struct_mutex); + + /* Look in our global stash of WC pages... */ + pvec = &vm->i915->mm.wc_stash; + if (likely(pvec->nr)) + return pvec->pages[--pvec->nr]; - page = alloc_page(gfp); - if (!page) + /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ + do { + struct page *page; + + page = alloc_page(gfp); + if (unlikely(!page)) + break; + + pvec->pages[pvec->nr++] = page; + } while (pagevec_space(pvec)); + + if (unlikely(!pvec->nr)) return NULL; - if (vm->pt_kmap_wc) - set_pages_array_wc(&page, 1); + set_pages_array_wc(pvec->pages, pvec->nr); - return page; + return pvec->pages[--pvec->nr]; } -static void vm_free_pages_release(struct i915_address_space *vm) +static void vm_free_pages_release(struct i915_address_space *vm, + bool immediate) { - GEM_BUG_ON(!pagevec_count(&vm->free_pages)); + struct pagevec *pvec = &vm->free_pages; + + GEM_BUG_ON(!pagevec_count(pvec)); + + if (vm->pt_kmap_wc) { + struct pagevec *stash = &vm->i915->mm.wc_stash; - if (vm->pt_kmap_wc) - set_pages_array_wb(vm->free_pages.pages, - pagevec_count(&vm->free_pages)); + /* When we use WC, first fill up the global stash and then + * only if full immediately free the overflow. + */ + + lockdep_assert_held(&vm->i915->drm.struct_mutex); + if (pagevec_space(stash)) { + do { + stash->pages[stash->nr++] = + pvec->pages[--pvec->nr]; + if (!pvec->nr) + return; + } while (pagevec_space(stash)); + + /* As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (!immediate) + return; + } + + set_pages_array_wb(pvec->pages, pvec->nr); + } - __pagevec_release(&vm->free_pages); + __pagevec_release(pvec); } static void vm_free_page(struct i915_address_space *vm, struct page *page) { if (!pagevec_add(&vm->free_pages, page)) - vm_free_pages_release(vm); + vm_free_pages_release(vm, false); } static int __setup_page_dma(struct i915_address_space *vm, @@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm, static int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) { - return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO); + struct page *page; + dma_addr_t addr; + + page = alloc_page(gfp | __GFP_ZERO); + if (unlikely(!page)) + return -ENOMEM; + + addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(vm->dma, addr))) { + __free_page(page); + return -ENOMEM; + } + + vm->scratch_page.page = page; + vm->scratch_page.daddr = addr; + return 0; } static void cleanup_scratch_page(struct i915_address_space *vm) { - cleanup_page_dma(vm, &vm->scratch_page); + struct i915_page_dma *p = &vm->scratch_page; + + dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + __free_page(p->page); } static struct i915_page_table *alloc_pt(struct i915_address_space *vm) @@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1ULL << 48 : 1ULL << 32; - ret = gen8_init_scratch(&ppgtt->base); - if (ret) { - ppgtt->base.total = 0; - return ret; - } - /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) ppgtt->base.pt_kmap_wc = true; + ret = gen8_init_scratch(&ppgtt->base); + if (ret) { + ppgtt->base.total = 0; + return ret; + } + if (use_4lvl(vm)) { ret = setup_px(&ppgtt->base, &ppgtt->pml4); if (ret) @@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm, static void i915_address_space_fini(struct i915_address_space *vm) { if (pagevec_count(&vm->free_pages)) - vm_free_pages_release(vm); + vm_free_pages_release(vm, true); i915_gem_timeline_fini(&vm->timeline); drm_mm_takedown(&vm->mm); @@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma, *vn; + struct pagevec *pvec; ggtt->base.closed = true; @@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) } ggtt->base.cleanup(&ggtt->base); + + pvec = &dev_priv->mm.wc_stash; + if (pvec->nr) { + set_pages_array_wb(pvec->pages, pvec->nr); + __pagevec_release(pvec); + } + mutex_unlock(&dev_priv->drm.struct_mutex); arch_phys_wc_del(ggtt->mtrr); -- cgit From 385db982b277a1eb5998b6316a500a2d6d9592b1 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Tue, 29 Aug 2017 16:09:07 -0700 Subject: drm/i915/cnl: Avoid ioremap_wc on Cannonlake as well. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Driver’s CPU access to GTT is via the GTTMMADR BAR. The current HW implementation of that BAR is to only support <= DW (and maybe QW) writes—not 16/32/64B writes that could occur with WC and/or SSE/AVX moves. GTTMMADR must be marked uncacheable (UC). Accesses to GTTMMADR(GTT), must be 64 bits or less (ie. 1 GTT entry). v2: Get clarification on the reasons and spec is getting updated to reflect it now. Cc: Joonas Lahtinen Suggested-by: Ben Widawsky Signed-off-by: Rodrigo Vivi Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20170829230907.21363-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 708b95cd8c30..7da9621d2c60 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2790,13 +2790,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; /* - * On BXT writes larger than 64 bit to the GTT pagetable range will be - * dropped. For WC mappings in general we have 64 byte burst writes - * when the WC buffer is flushed, so we can't use it, but have to + * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_GEN9_LP(dev_priv)) + if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) ggtt->gsm = ioremap_nocache(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); -- cgit From 6e31cdcfe17d8a25530924183d4a843602baebb1 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Fri, 1 Sep 2017 03:36:14 +0800 Subject: drm/i915: Fix the missing PPAT cache attributes on CNL Add back the GEN8_PPAT_WB cache attributes in cnl_setup_private_ppat(), which are missed on CNL. Fixes: 4e34935fcf69 ("drm/i915/cnl: Setup PAT Index.") Cc: Ben Widawsky Cc: Rodrigo Vivi Cc: Chris Wilson Suggested-by: Joonas Lahtinen Signed-off-by: Zhi Wang Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/1504208177-27784-1-git-send-email-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7da9621d2c60..c33c2f97c814 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2828,10 +2828,10 @@ static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability -- cgit