summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-01-21 17:31:43 +0000
committerRodrigo Vivi <rodrigo.vivi@intel.com>2018-02-01 07:32:46 -0800
commit124804c4c4b2e84e956cedd17878562e230a907f (patch)
treeac7d8efe51b78395d3cc4bb059c0d34102fde8b6 /drivers/gpu/drm/i915/i915_gem_gtt.c
parentc5bd1fc9a6c843c85a5cea5765cdc997f832df3c (diff)
drm/i915: Protect WC stash allocation against direct reclaim
As we attempt to allocate pages for use in a new WC stash, direct reclaim may run underneath us and fill up the WC stash. We have to be careful then not to overflow the pvec. Fixes: 66df1014efba ("drm/i915: Keep a small stash of preallocated WC pages") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103109 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180121173143.17090-1-chris@chris-wilson.co.uk (cherry picked from commit 073cd7816685ac77c6d8b4d321a5586c9177b76a) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0de4f3f13fc4..133cb9590003 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct pagevec *pvec = &vm->free_pages;
+ struct pagevec stash;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
- /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+ /*
+ * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stash);
do {
struct page *page;
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- pvec->pages[pvec->nr++] = page;
- } while (pagevec_space(pvec));
+ stash.pages[stash.nr++] = page;
+ } while (stash.nr < pagevec_space(pvec));
- if (unlikely(!pvec->nr))
- return NULL;
+ if (stash.nr) {
+ int nr = min_t(int, stash.nr, pagevec_space(pvec));
+ struct page **pages = stash.pages + stash.nr - nr;
- set_pages_array_wc(pvec->pages, pvec->nr);
+ if (nr && !set_pages_array_wc(pages, nr)) {
+ memcpy(pvec->pages + pvec->nr,
+ pages, sizeof(pages[0]) * nr);
+ pvec->nr += nr;
+ stash.nr -= nr;
+ }
+
+ pagevec_release(&stash);
+ }
- return pvec->pages[--pvec->nr];
+ return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
}
static void vm_free_pages_release(struct i915_address_space *vm,