summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h245
1 files changed, 191 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 382485115b06..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -30,8 +30,120 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_res_cursor.h"
+
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
+
+/* BO flag to indicate a KFD userptr BO */
+#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
+
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
+
+struct amdgpu_bo_param {
+ unsigned long size;
+ int byte_align;
+ u32 bo_ptr_size;
+ u32 domain;
+ u32 preferred_domain;
+ u64 flags;
+ enum ttm_bo_type type;
+ bool no_wait_gpu;
+ struct dma_resv *resv;
+ void (*destroy)(struct ttm_buffer_object *bo);
+ /* xcp partition number plus 1, 0 means any partition */
+ int8_t xcp_id_plus1;
+};
+
+/* bo virtual addresses in a vm */
+struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
+ struct list_head list;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
+ uint64_t offset;
+ uint32_t flags;
+};
+
+/* User space allocated BO in a VM */
+struct amdgpu_bo_va {
+ struct amdgpu_vm_bo_base base;
+
+ /* protected by bo being reserved */
+ unsigned ref_count;
+
+ /* all other members protected by the VM PD being reserved */
+ struct dma_fence *last_pt_update;
+
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+
+ /* If the mappings are cleared or filled */
+ bool cleared;
+
+ bool is_xgmi;
+
+ /*
+ * protected by vm reservation lock
+ * if non-zero, cannot unmap from GPU because user queues may still access it
+ */
+ unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
+};
+
+struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ u64 flags;
+ /* per VM structure for page tables and with virtual addresses */
+ struct amdgpu_vm_bo_base *vm_bo;
+ /* Constant after initialization */
+ struct amdgpu_bo *parent;
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
+ struct kgd_mem *kfd_bo;
+
+ /*
+ * For GPUs with spatial partitioning, xcp partition number, -1 means
+ * any partition. For other ASICs without spatial partition, always 0
+ * for memory accounting.
+ */
+ int8_t xcp_id;
+};
+
+struct amdgpu_bo_user {
+ struct amdgpu_bo bo;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+
+};
+
+struct amdgpu_bo_vm {
+ struct amdgpu_bo bo;
+ struct amdgpu_vm_bo_base entries[];
+};
+
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct amdgpu_bo, tbo);
+}
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
@@ -54,6 +166,10 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_GWS;
case AMDGPU_PL_OA:
return AMDGPU_GEM_DOMAIN_OA;
+ case AMDGPU_PL_DOORBELL:
+ return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
@@ -90,17 +206,17 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
}
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
}
/**
@@ -111,51 +227,67 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
*/
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
- * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
- * is accessible to the GPU.
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
-static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
{
- return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr);
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr);
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr);
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr);
-int amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
+void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
@@ -165,37 +297,41 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ struct ttm_resource *new_mem);
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
-int amdgpu_bo_validate(struct amdgpu_bo *bo);
-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct);
-
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
+ uint32_t domain);
/*
* sub allocation
*/
+static inline struct amdgpu_sa_manager *
+to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct amdgpu_sa_manager, base);
+}
-static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -205,18 +341,19 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence);
+ struct drm_suballoc **sa_bo,
+ unsigned int size);
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
+u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
#endif
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
+
+bool amdgpu_bo_support_uswc(u64 bo_flags);
#endif