summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu/ivpu_gem.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/accel/ivpu/ivpu_gem.h')
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h108
1 files changed, 49 insertions, 59 deletions
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index 6b0ceda5f253..0c3350f22b55 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,79 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
-struct dma_buf;
-struct ivpu_bo_ops;
struct ivpu_file_priv;
struct ivpu_bo {
- struct drm_gem_object base;
- const struct ivpu_bo_ops *ops;
-
+ struct drm_gem_shmem_object base;
struct ivpu_mmu_context *ctx;
- struct list_head ctx_node;
+ struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
- struct sg_table *sgt;
- struct page **pages;
- bool mmu_mapped;
-
- void *kvaddr;
u64 vpu_addr;
- u32 handle;
u32 flags;
- uintptr_t user_ptr;
- u32 job_status;
-};
-
-enum ivpu_bo_type {
- IVPU_BO_TYPE_SHMEM = 1,
- IVPU_BO_TYPE_INTERNAL,
- IVPU_BO_TYPE_PRIME,
-};
-
-struct ivpu_bo_ops {
- enum ivpu_bo_type type;
- const char *name;
- int (*alloc_pages)(struct ivpu_bo *bo);
- void (*free_pages)(struct ivpu_bo *bo);
- int (*map_pages)(struct ivpu_bo *bo);
- void (*unmap_pages)(struct ivpu_bo *bo);
+ u32 job_status; /* Valid only for command buffer */
+ u32 ctx_id;
+ bool mmu_mapped;
};
-int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
-void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
-void ivpu_bo_list_print(struct drm_device *dev);
+int ivpu_bo_bind(struct ivpu_bo *bo);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
-struct ivpu_bo *
-ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
-void ivpu_bo_free_internal(struct ivpu_bo *bo);
+struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
-void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
+struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
+void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
+void ivpu_bo_list_print(struct drm_device *dev);
static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
{
- return container_of(obj, struct ivpu_bo, base);
+ return container_of(obj, struct ivpu_bo, base.base);
}
-static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
+static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo)
{
- if (offset > bo->base.size || !bo->pages)
- return NULL;
+ return bo->base.vaddr;
+}
- return bo->pages[offset / PAGE_SIZE];
+static inline size_t ivpu_bo_size(struct ivpu_bo *bo)
+{
+ return bo->base.base.size;
}
static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
@@ -81,25 +64,27 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
return bo->flags & DRM_IVPU_BO_CACHE_MASK;
}
-static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
+static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
{
- return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+ return to_ivpu_device(bo->base.base.dev);
}
-static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
+static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
- if (bo->flags & DRM_IVPU_BO_WC)
- return pgprot_writecombine(prot);
+ if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo)))
+ return true;
- if (bo->flags & DRM_IVPU_BO_UNCACHED)
- return pgprot_noncached(prot);
+ return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+}
- return prot;
+static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_READ_ONLY;
}
-static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
+static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo)
{
- return to_ivpu_device(bo->base.dev);
+ return !!bo->base.pages;
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
@@ -107,21 +92,26 @@ static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
if (vpu_addr < bo->vpu_addr)
return NULL;
- if (vpu_addr >= (bo->vpu_addr + bo->base.size))
+ if (vpu_addr >= (bo->vpu_addr + ivpu_bo_size(bo)))
return NULL;
- return bo->kvaddr + (vpu_addr - bo->vpu_addr);
+ return ivpu_bo_vaddr(bo) + (vpu_addr - bo->vpu_addr);
}
static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
{
- if (cpu_addr < bo->kvaddr)
+ if (cpu_addr < ivpu_bo_vaddr(bo))
return 0;
- if (cpu_addr >= (bo->kvaddr + bo->base.size))
+ if (cpu_addr >= (ivpu_bo_vaddr(bo) + ivpu_bo_size(bo)))
return 0;
- return bo->vpu_addr + (cpu_addr - bo->kvaddr);
+ return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
+}
+
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
}
#endif /* __IVPU_GEM_H__ */