summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_mmu.h')
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h82
1 files changed, 60 insertions, 22 deletions
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4580c8..8915662fbd4d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_MMU_H__
@@ -20,38 +9,87 @@
#include <linux/iommu.h>
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
+ void (*detach)(struct msm_mmu *mmu);
+ void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+ uint64_t iova, size_t len);
+ int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
+ void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len);
+ size_t off, size_t len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
+ void (*set_stall)(struct msm_mmu *mmu, bool enable);
+};
+
+enum msm_mmu_type {
+ MSM_MMU_GPUMMU,
+ MSM_MMU_IOMMU,
+ MSM_MMU_IOMMU_PAGETABLE,
+};
+
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+ /** @count: Number of pages reserved. */
+ uint32_t count;
+ /** @ptr: Index of first unused page in @pages */
+ uint32_t ptr;
+ /**
+ * @pages: Array of pages preallocated for MMU table updates.
+ *
+ * After a VM operation, there might be free pages remaining in this
+ * array (since the amount allocated is a worst-case). These are
+ * returned to the pt_cache at mmu->prealloc_cleanup().
+ */
+ void **pages;
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
- int (*handler)(void *arg, unsigned long iova, int flags);
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
+ enum msm_mmu_type type;
+
+ /**
+ * @prealloc: pre-allocated pages for pgtable
+ *
+ * Set while a VM_BIND job is running, serialized under
+ * msm_gem_vm::mmu_lock.
+ */
+ struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
- const struct msm_mmu_funcs *funcs)
+ const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
{
mmu->dev = dev;
mmu->funcs = funcs;
+ mmu->type = type;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
+struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
- int (*handler)(void *arg, unsigned long iova, int flags))
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data))
{
mmu->arg = arg;
mmu->handler = handler;
}
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
+ int *asid);
+int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
+struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
+
#endif /* __MSM_MMU_H__ */