summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_mmu.h')
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h40
1 files changed, 37 insertions, 3 deletions
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index daf91529e02b..8915662fbd4d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -9,13 +9,21 @@
#include <linux/iommu.h>
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
+ void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len);
+ int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
+ void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
- void (*resume_translation)(struct msm_mmu *mmu);
+ void (*set_stall)(struct msm_mmu *mmu, bool enable);
};
enum msm_mmu_type {
@@ -24,12 +32,38 @@ enum msm_mmu_type {
MSM_MMU_IOMMU_PAGETABLE,
};
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+ /** @count: Number of pages reserved. */
+ uint32_t count;
+ /** @ptr: Index of first unused page in @pages */
+ uint32_t ptr;
+ /**
+ * @pages: Array of pages preallocated for MMU table updates.
+ *
+ * After a VM operation, there might be free pages remaining in this
+ * array (since the amount allocated is a worst-case). These are
+ * returned to the pt_cache at mmu->prealloc_cleanup().
+ */
+ void **pages;
+};
+
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
+
+ /**
+ * @prealloc: pre-allocated pages for pgtable
+ *
+ * Set while a VM_BIND job is running, serialized under
+ * msm_gem_vm::mmu_lock.
+ */
+ struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -51,7 +85,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);