diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_ppgtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ppgtt.c | 209 |
1 files changed, 157 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index f86f7e68ce5e..d07a4f97b943 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -5,12 +5,15 @@ #include <linux/slab.h> +#include "gem/i915_gem_lmem.h" + #include "i915_trace.h" +#include "intel_gt.h" #include "intel_gtt.h" #include "gen6_ppgtt.h" #include "gen8_ppgtt.h" -struct i915_page_table *alloc_pt(struct i915_address_space *vm) +struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz) { struct i915_page_table *pt; @@ -18,23 +21,31 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm) if (unlikely(!pt)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, &pt->base))) { + pt->base = vm->alloc_pt_dma(vm, sz); + if (IS_ERR(pt->base)) { kfree(pt); return ERR_PTR(-ENOMEM); } + pt->is_compact = false; atomic_set(&pt->used, 0); return pt; } -struct i915_page_directory *__alloc_pd(size_t sz) +struct i915_page_directory *__alloc_pd(int count) { struct i915_page_directory *pd; - pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); + pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return NULL; + pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL); + if (unlikely(!pd->entry)) { + kfree(pd); + return NULL; + } + spin_lock_init(&pd->lock); return pd; } @@ -43,11 +54,13 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm) { struct i915_page_directory *pd; - pd = __alloc_pd(sizeof(*pd)); + pd = __alloc_pd(I915_PDES); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, px_base(pd)))) { + pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(pd->pt.base)) { + kfree(pd->entry); kfree(pd); return ERR_PTR(-ENOMEM); } @@ -55,41 +68,51 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } -void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) +void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) { - cleanup_page_dma(vm, pd); - kfree(pd); + BUILD_BUG_ON(offsetof(struct i915_page_directory, pt)); + + if (lvl) { + struct i915_page_directory *pd = + container_of(pt, typeof(*pd), pt); + kfree(pd->entry); + } + + if (pt->base) + i915_gem_object_put(pt->base); + + kfree(pt); } -static inline void -write_dma_entry(struct i915_page_dma * const pdma, +static void +write_dma_entry(struct drm_i915_gem_object * const pdma, const unsigned short idx, const u64 encoded_entry) { - u64 * const vaddr = kmap_atomic(pdma->page); + u64 * const vaddr = __px_vaddr(pdma); vaddr[idx] = encoded_entry; - kunmap_atomic(vaddr); + drm_clflush_virt_range(&vaddr[idx], sizeof(u64)); } void __set_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - struct i915_page_dma * const to, + struct i915_page_table * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { /* Each thread pre-pins the pd, and we may have a thread per pde. */ - GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); + GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES); atomic_inc(px_used(pd)); pd->entry[idx] = to; - write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); + write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC)); } void clear_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - const struct i915_page_scratch * const scratch) + const struct drm_i915_gem_object * const scratch) { GEM_BUG_ON(atomic_read(px_used(pd)) == 0); @@ -102,7 +125,7 @@ bool release_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, struct i915_page_table * const pt, - const struct i915_page_scratch * const scratch) + const struct drm_i915_gem_object * const scratch) { bool free = false; @@ -125,28 +148,29 @@ int i915_ppgtt_init_hw(struct intel_gt *gt) gtt_write_workarounds(gt); - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) gen6_ppgtt_enable(gt); - else if (IS_GEN(i915, 7)) + else if (GRAPHICS_VER(i915) == 7) gen7_ppgtt_enable(gt); return 0; } static struct i915_ppgtt * -__ppgtt_create(struct intel_gt *gt) +__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags) { - if (INTEL_GEN(gt->i915) < 8) + if (GRAPHICS_VER(gt->i915) < 8) return gen6_ppgtt_create(gt); else - return gen8_ppgtt_create(gt); + return gen8_ppgtt_create(gt, lmem_pt_obj_flags); } -struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, + unsigned long lmem_pt_obj_flags) { struct i915_ppgtt *ppgtt; - ppgtt = __ppgtt_create(gt); + ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags); if (IS_ERR(ppgtt)) return ppgtt; @@ -155,64 +179,145 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) return ppgtt; } -static int ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +void ppgtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma_resource *vma_res, + unsigned int pat_index, + u32 flags) { u32 pte_flags; - int err; - if (flags & I915_VMA_ALLOC) { - err = vma->vm->allocate_va_range(vma->vm, - vma->node.start, vma->size); - if (err) - return err; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); + if (!vma_res->allocated) { + vm->allocate_va_range(vm, stash, vma_res->start, + vma_res->vma_size); + vma_res->allocated = true; } /* Applicable to VLV, and gen8+ */ pte_flags = 0; - if (i915_gem_object_is_readonly(vma->obj)) + if (vma_res->bi.readonly) pte_flags |= PTE_READ_ONLY; + if (vma_res->bi.lmem) + pte_flags |= PTE_LM; - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + vm->insert_entries(vm, vma_res, pat_index, pte_flags); wmb(); +} - return 0; +void ppgtt_unbind_vma(struct i915_address_space *vm, + struct i915_vma_resource *vma_res) +{ + if (!vma_res->allocated) + return; + + vm->clear_range(vm, vma_res->start, vma_res->vma_size); + vma_invalidate_tlb(vm, vma_res->tlb); } -static void ppgtt_unbind_vma(struct i915_vma *vma) +static unsigned long pd_count(u64 size, int shift) { - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); + /* Beware later misalignment */ + return (size + 2 * (BIT_ULL(shift) - 1)) >> shift; } -int ppgtt_set_pages(struct i915_vma *vma) +int i915_vm_alloc_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 size) { - GEM_BUG_ON(vma->pages); + unsigned long count; + int shift, n, pt_sz; + + shift = vm->pd_shift; + if (!shift) + return 0; + + pt_sz = stash->pt_sz; + if (!pt_sz) + pt_sz = I915_GTT_PAGE_SIZE_4K; + else + GEM_BUG_ON(!IS_DGFX(vm->i915)); + + GEM_BUG_ON(!is_power_of_2(pt_sz)); - vma->pages = vma->obj->mm.pages; + count = pd_count(size, shift); + while (count--) { + struct i915_page_table *pt; - vma->page_sizes = vma->obj->mm.page_sizes; + pt = alloc_pt(vm, pt_sz); + if (IS_ERR(pt)) { + i915_vm_free_pt_stash(vm, stash); + return PTR_ERR(pt); + } + + pt->stash = stash->pt[0]; + stash->pt[0] = pt; + } + + for (n = 1; n < vm->top; n++) { + shift += ilog2(I915_PDES); /* Each PD holds 512 entries */ + count = pd_count(size, shift); + while (count--) { + struct i915_page_directory *pd; + + pd = alloc_pd(vm); + if (IS_ERR(pd)) { + i915_vm_free_pt_stash(vm, stash); + return PTR_ERR(pd); + } + + pd->pt.stash = stash->pt[1]; + stash->pt[1] = &pd->pt; + } + } return 0; } -void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) +int i915_vm_map_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash) +{ + struct i915_page_table *pt; + int n, err; + + for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { + for (pt = stash->pt[n]; pt; pt = pt->stash) { + err = map_pt_dma_locked(vm, pt->base); + if (err) + return err; + } + } + + return 0; +} + +void i915_vm_free_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash) +{ + struct i915_page_table *pt; + int n; + + for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { + while ((pt = stash->pt[n])) { + stash->pt[n] = pt->stash; + free_px(vm, pt, n); + } + } +} + +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, + unsigned long lmem_pt_obj_flags) { struct drm_i915_private *i915 = gt->i915; ppgtt->vm.gt = gt; ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + ppgtt->vm.dma = i915->drm.dev; + ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size); + ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags; + dma_resv_init(&ppgtt->vm._resv); i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; } |
