diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_svm.h')
-rw-r--r-- | drivers/gpu/drm/xe/xe_svm.h | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 30fc78b85b30..da9a69ea0bb1 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -70,6 +70,26 @@ int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx); + +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx); + +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, + struct drm_gpusvm_ctx *ctx); + +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + bool preferred_region_is_vram); + +void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range); + +bool xe_svm_range_validate(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_mask, bool devmem_preferred); + +u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma); + /** * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping * @range: SVM range @@ -82,6 +102,53 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) return range->base.flags.has_dma_mapping; } +/** + * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range + * @r: Pointer to the drm_gpusvm_range structure + * + * This function takes a pointer to a drm_gpusvm_range structure and + * converts it to a pointer to the containing xe_svm_range structure. + * + * Return: Pointer to the xe_svm_range structure + */ +static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) +{ + return container_of(r, struct xe_svm_range, base); +} + +/** + * xe_svm_range_start() - SVM range start address + * @range: SVM range + * + * Return: start address of range. + */ +static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) +{ + return drm_gpusvm_range_start(&range->base); +} + +/** + * xe_svm_range_end() - SVM range end address + * @range: SVM range + * + * Return: end address of range. + */ +static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) +{ + return drm_gpusvm_range_end(&range->base); +} + +/** + * xe_svm_range_size() - SVM range size + * @range: SVM range + * + * Return: Size of range. + */ +static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) +{ + return drm_gpusvm_range_size(&range->base); +} + #define xe_svm_assert_in_notifier(vm__) \ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) @@ -97,6 +164,8 @@ void xe_svm_flush(struct xe_vm *vm); #include <linux/interval_tree.h> struct drm_pagemap_device_addr; +struct drm_gpusvm_ctx; +struct drm_gpusvm_range; struct xe_bo; struct xe_gt; struct xe_vm; @@ -167,6 +236,73 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } +static inline int +xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + return -EOPNOTSUPP; +} + +static inline +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx) +{ + return ERR_PTR(-EINVAL); +} + +static inline +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, + struct drm_gpusvm_ctx *ctx) +{ + return -EINVAL; +} + +static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r) +{ + return NULL; +} + +static inline unsigned long xe_svm_range_start(struct xe_svm_range *range) +{ + return 0; +} + +static inline unsigned long xe_svm_range_end(struct xe_svm_range *range) +{ + return 0; +} + +static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) +{ + return 0; +} + +static inline +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + u32 region) +{ + return false; +} + +static inline +void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) +{ +} + +static inline +bool xe_svm_range_validate(struct xe_vm *vm, + struct xe_svm_range *range, + u8 tile_mask, bool devmem_preferred) +{ + return false; +} + +static inline +u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma) +{ + return ULONG_MAX; +} + #define xe_svm_assert_in_notifier(...) do {} while (0) #define xe_svm_range_has_dma_mapping(...) false |