diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gtt.h')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/intel_gtt.h | 196 |
1 files changed, 133 insertions, 63 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 177b42b935a1..9d3a3ad567a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -27,14 +27,17 @@ #include "gt/intel_reset.h" #include "i915_selftest.h" +#include "i915_vma_resource.h" #include "i915_vma_types.h" +#include "i915_params.h" +#include "intel_memory_region.h" #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) -#define DBG(...) trace_printk(__VA_ARGS__) +#define GTT_TRACE(...) trace_printk(__VA_ARGS__) #else -#define DBG(...) +#define GTT_TRACE(...) #endif #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ @@ -85,9 +88,20 @@ typedef u64 gen8_pte_t; #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) #define BYT_PTE_WRITEABLE REG_BIT(1) +#define MTL_PPGTT_PTE_PAT3 BIT_ULL(62) #define GEN12_PPGTT_PTE_LM BIT_ULL(11) +#define GEN12_PPGTT_PTE_PAT2 BIT_ULL(7) +#define GEN12_PPGTT_PTE_PAT1 BIT_ULL(4) +#define GEN12_PPGTT_PTE_PAT0 BIT_ULL(3) -#define GEN12_GGTT_PTE_LM BIT_ULL(1) +#define GEN12_GGTT_PTE_LM BIT_ULL(1) +#define MTL_GGTT_PTE_PAT0 BIT_ULL(52) +#define MTL_GGTT_PTE_PAT1 BIT_ULL(53) +#define GEN12_GGTT_PTE_ADDR_MASK GENMASK_ULL(45, 12) +#define MTL_GGTT_PTE_PAT_MASK GENMASK_ULL(53, 52) + +#define GEN12_PDE_64K BIT(6) +#define GEN12_PTE_PS64 BIT(8) /* * Cacheability Control is a 4-bit value. The low three bits are stored in bits @@ -141,7 +155,13 @@ typedef u64 gen8_pte_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) -enum i915_cache_level; +#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2) +#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) +#define MTL_PPAT_L4_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3) +#define MTL_PPAT_L4_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1) +#define MTL_PPAT_L4_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0) +#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3) +#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2) struct drm_i915_gem_object; struct i915_fence_reg; @@ -151,12 +171,16 @@ struct intel_gt; #define for_each_sgt_daddr(__dp, __iter, __sgt) \ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) +#define for_each_sgt_daddr_next(__dp, __iter) \ + __for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE) + struct i915_page_table { struct drm_i915_gem_object *base; union { atomic_t used; struct i915_page_table *stash; }; + bool is_compact; }; struct i915_page_directory { @@ -194,21 +218,30 @@ void *__px_vaddr(struct drm_i915_gem_object *p); struct i915_vm_pt_stash { /* preallocated chains of page tables/directories */ struct i915_page_table *pt[2]; + /* + * Optionally override the alignment/size of the physical page that + * contains each PT. If not set defaults back to the usual + * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging + * structures. MUST be a power-of-two. ONLY applicable on discrete + * platforms. + */ + int pt_sz; }; struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ void (*bind_vma)(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, - struct i915_vma *vma, - enum i915_cache_level cache_level, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags); /* * Unmap an object from an address space. This usually consists of * setting the valid PTE entries to a reserved scratch page. */ void (*unbind_vma)(struct i915_address_space *vm, - struct i915_vma *vma); + struct i915_vma_resource *vma_res); + }; struct i915_address_space { @@ -216,23 +249,20 @@ struct i915_address_space { struct work_struct release_work; struct drm_mm mm; + struct { + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + } rsvd; struct intel_gt *gt; struct drm_i915_private *i915; + struct drm_i915_file_private *fpriv; struct device *dma; u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ + u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1]; unsigned int bind_async_flags; - /* - * Each active user context has its own address space (in full-ppgtt). - * Since the vm may be shared between multiple contexts, we count how - * many contexts keep us "open". Once open hits zero, we are closed - * and do not allow any new attachments, and proceed to shutdown our - * vma and page directories. - */ - atomic_t open; - struct mutex mutex; /* protects vma and our lists */ struct kref resv_ref; /* kref to keep the reservation lock alive. */ @@ -247,6 +277,11 @@ struct i915_address_space { */ struct list_head bound_list; + /** + * List of vmas not yet bound or evicted. + */ + struct list_head unbound_list; + /* Global GTT */ bool is_ggtt:1; @@ -256,6 +291,9 @@ struct i915_address_space { /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; + /* Skip pte rewrite on unbind for suspend. Protected by @mutex */ + bool skip_pte_rewrite:1; + u8 top; u8 pd_shift; u8 scratch_order; @@ -263,14 +301,18 @@ struct i915_address_space { /* Flags used when creating page-table objects for this vm */ unsigned long lmem_pt_obj_flags; + /* Interval tree for pending unbind vma resources */ + struct rb_root_cached pending_unbind; + struct drm_i915_gem_object * (*alloc_pt_dma)(struct i915_address_space *vm, int sz); struct drm_i915_gem_object * (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); u64 (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, + unsigned int pat_index, u32 flags); /* Create a valid PTE */ + dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local); #define PTE_READ_ONLY BIT(0) #define PTE_LM BIT(1) @@ -279,15 +321,28 @@ struct i915_address_space { u64 start, u64 length); void (*clear_range)(struct i915_address_space *vm, u64 start, u64 length); + void (*scratch_range)(struct i915_address_space *vm, + u64 start, u64 length); void (*insert_page)(struct i915_address_space *vm, dma_addr_t addr, u64 offset, - enum i915_cache_level cache_level, + unsigned int pat_index, u32 flags); void (*insert_entries)(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags); + void (*raw_insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + unsigned int pat_index, + u32 flags); + void (*raw_insert_entries)(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, + u32 flags); + dma_addr_t (*read_entry)(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local); void (*cleanup)(struct i915_address_space *vm); void (*foreach)(struct i915_address_space *vm, @@ -346,12 +401,12 @@ struct i915_ggtt { */ struct list_head userfault_list; - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - struct mutex error_mutex; struct drm_mm_node error_capture; struct drm_mm_node uc_fw; + + /** List of GTs mapping this GGTT */ + struct list_head gt_list; }; struct i915_ppgtt { @@ -364,6 +419,8 @@ struct i915_ppgtt { #define i915_is_dpt(vm) ((vm)->is_dpt) #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) +bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915); + int __must_check i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); @@ -379,6 +436,25 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); } +static inline u64 i915_vm_min_alignment(struct i915_address_space *vm, + enum intel_memory_type type) +{ + /* avoid INTEL_MEMORY_MOCK overflow */ + if ((int)type >= ARRAY_SIZE(vm->min_alignment)) + type = INTEL_MEMORY_SYSTEM; + + return vm->min_alignment[type]; +} + +static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm, + struct drm_i915_gem_object *obj) +{ + struct intel_memory_region *mr = READ_ONCE(obj->mm.region); + enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM; + + return i915_vm_min_alignment(vm, type); +} + static inline bool i915_vm_has_cache_coloring(struct i915_address_space *vm) { @@ -408,6 +484,17 @@ i915_vm_get(struct i915_address_space *vm) return vm; } +static inline struct i915_address_space * +i915_vm_tryget(struct i915_address_space *vm) +{ + return kref_get_unless_zero(&vm->ref) ? vm : NULL; +} + +static inline void assert_vm_alive(struct i915_address_space *vm) +{ + GEM_BUG_ON(!kref_read(&vm->ref)); +} + /** * i915_vm_resv_get - Obtain a reference on the vm's reservation lock * @vm: The vm whose reservation lock we want to share. @@ -431,41 +518,13 @@ static inline void i915_vm_put(struct i915_address_space *vm) /** * i915_vm_resv_put - Release a reference on the vm's reservation lock - * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get() + * @vm: The vm whose reservation lock reference we want to release */ static inline void i915_vm_resv_put(struct i915_address_space *vm) { kref_put(&vm->resv_ref, i915_vm_resv_release); } -static inline struct i915_address_space * -i915_vm_open(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - atomic_inc(&vm->open); - return i915_vm_get(vm); -} - -static inline bool -i915_vm_tryopen(struct i915_address_space *vm) -{ - if (atomic_add_unless(&vm->open, 1, 0)) - return i915_vm_get(vm); - - return false; -} - -void __i915_vm_close(struct i915_address_space *vm); - -static inline void -i915_vm_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(!atomic_read(&vm->open)); - __i915_vm_close(vm); - - i915_vm_put(vm); -} - void i915_address_space_init(struct i915_address_space *vm, int subclass); void i915_address_space_fini(struct i915_address_space *vm); @@ -526,15 +585,24 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, unsigned long lmem_pt_obj_flags); +void intel_ggtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma_resource *vma_res, + unsigned int pat_index, + u32 flags); +void intel_ggtt_unbind_vma(struct i915_address_space *vm, + struct i915_vma_resource *vma_res); + +dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local); int i915_ggtt_probe_hw(struct drm_i915_private *i915); int i915_ggtt_init_hw(struct drm_i915_private *i915); int i915_ggtt_enable_hw(struct drm_i915_private *i915); -void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); -void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *i915); void i915_ggtt_driver_release(struct drm_i915_private *i915); void i915_ggtt_driver_late_release(struct drm_i915_private *i915); +struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915); static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) { @@ -546,8 +614,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags); -void i915_ggtt_suspend_vm(struct i915_address_space *vm); -bool i915_ggtt_resume_vm(struct i915_address_space *vm); +void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all); +bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); @@ -565,7 +633,7 @@ void free_scratch(struct i915_address_space *vm); struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); -struct i915_page_table *alloc_pt(struct i915_address_space *vm); +struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz); struct i915_page_directory *alloc_pd(struct i915_address_space *vm); struct i915_page_directory *__alloc_pd(int npde); @@ -600,15 +668,15 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); void ppgtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, - struct i915_vma *vma, - enum i915_cache_level cache_level, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags); void ppgtt_unbind_vma(struct i915_address_space *vm, - struct i915_vma *vma); + struct i915_vma_resource *vma_res); void gtt_write_workarounds(struct intel_gt *gt); -void setup_private_pat(struct intel_uncore *uncore); +void setup_private_pat(struct intel_gt *gt); int i915_vm_alloc_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, @@ -627,11 +695,13 @@ __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long static inline struct sgt_dma { struct scatterlist *sg; dma_addr_t dma, max; -} sgt_dma(struct i915_vma *vma) { - struct scatterlist *sg = vma->pages->sgl; +} sgt_dma(struct i915_vma_resource *vma_res) { + struct scatterlist *sg = vma_res->bi.pages->sgl; dma_addr_t addr = sg_dma_address(sg); return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } +bool i915_ggtt_require_binder(struct drm_i915_private *i915); + #endif |
