diff options
author | Matt Roper <matthew.d.roper@intel.com> | 2023-06-01 14:52:25 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-19 18:34:14 -0500 |
commit | 876611c2b75689c6bea43bdbbbef9b358f71526a (patch) | |
tree | 9a5ac2da4dd0a51b434b21713c38796c25631f74 /drivers/gpu/drm/xe/xe_pt.h | |
parent | ebd288cba7db7097ad50a4736ded94cb0d92fadf (diff) |
drm/xe: Memory allocations are tile-based, not GT-based
Since memory and address spaces are a tile concept rather than a GT
concept, we need to plumb tile-based handling through lots of
memory-related code.
Note that one remaining shortcoming here that will need to be addressed
before media GT support can be re-enabled is that although the address
space is shared between a tile's GTs, each GT caches the PTEs
independently in their own TLB and thus TLB invalidation should be
handled at the GT level.
v2:
- Fix kunit test build.
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-13-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_pt.h')
-rw-r--r-- | drivers/gpu/drm/xe/xe_pt.h | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h index 1152043e5c63..10f334b9c004 100644 --- a/drivers/gpu/drm/xe/xe_pt.h +++ b/drivers/gpu/drm/xe/xe_pt.h @@ -13,8 +13,8 @@ struct dma_fence; struct xe_bo; struct xe_device; struct xe_engine; -struct xe_gt; struct xe_sync_entry; +struct xe_tile; struct xe_vm; struct xe_vma; @@ -23,27 +23,27 @@ struct xe_vma; unsigned int xe_pt_shift(unsigned int level); -struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt, +struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, unsigned int level); -int xe_pt_create_scratch(struct xe_device *xe, struct xe_gt *gt, +int xe_pt_create_scratch(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm); -void xe_pt_populate_empty(struct xe_gt *gt, struct xe_vm *vm, +void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, struct xe_pt *pt); void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred); struct dma_fence * -__xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e, +__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, struct xe_sync_entry *syncs, u32 num_syncs, bool rebind); struct dma_fence * -__xe_pt_unbind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e, +__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, struct xe_sync_entry *syncs, u32 num_syncs); -bool xe_pt_zap_ptes(struct xe_gt *gt, struct xe_vma *vma); +bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma); u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset, const enum xe_cache_level level); |