summaryrefslogtreecommitdiff
path: root/include/uapi/drm/msm_drm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi/drm/msm_drm.h')
-rw-r--r--include/uapi/drm/msm_drm.h149
1 files changed, 134 insertions, 15 deletions
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 2342cb90857e..5c67294edc95 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -91,6 +91,32 @@ struct drm_msm_timespec {
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
+/* PRR (Partially Resident Region) is required for sparse residency: */
+#define MSM_PARAM_HAS_PRR 0x15 /* RO */
+/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
+ *
+ * With VM_BIND enabled, userspace is required to allocate iova and use the
+ * VM_BIND ops for map/unmap ioctls. MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
+ * will be rejected. (The latter does not have a sensible meaning when a BO
+ * can have multiple and/or partial mappings.)
+ *
+ * With VM_BIND enabled, userspace does not include a submit_bo table in the
+ * SUBMIT ioctl (this will be rejected), the resident set is determined by
+ * the the VM_BIND ops.
+ *
+ * Enabling VM_BIND will fail on devices which do not have per-process pgtables.
+ * And it is not allowed to disable VM_BIND once it has been enabled.
+ *
+ * Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
+ * submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
+ *
+ * Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
+ * from GPU faults or failed async VM_BIND ops, in particular because it is
+ * difficult to communicate to userspace which op failed so that userspace
+ * could rewind and try again. When the VM is marked unusable, the SUBMIT
+ * ioctl will throw -EPIPE.
+ */
+#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -114,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+/* Private buffers do not need to be explicitly listed in the SUBMIT
+ * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
+ * buffers may NOT be imported/exported or used for scanout (or any
+ * other situation where buffers can be indefinitely pinned, but
+ * cases other than scanout are all kernel owned BOs which are not
+ * visible to userspace).
+ *
+ * In exchange for those constraints, all private BOs associated with
+ * a single context (drm_file) share a single dma_resv, and if there
+ * has been no eviction since the last submit, there are no per-BO
+ * bookeeping to do, significantly cutting the SUBMIT overhead.
+ */
+#define MSM_BO_NO_SHARE 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -123,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
+ MSM_BO_NO_SHARE | \
MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
@@ -180,6 +220,17 @@ struct drm_msm_gem_cpu_fini {
* Cmdstream Submission:
*/
+#define MSM_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SYNCOBJ_FLAGS ( \
+ MSM_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* The value written into the cmdstream is logically:
*
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
@@ -221,7 +272,10 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 relocs; /* in, ptr to array of submit_reloc's */
+ union {
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+ __u64 iova; /* cmdstream address (for VM_BIND contexts) */
+ };
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -269,17 +323,6 @@ struct drm_msm_gem_submit_bo {
MSM_SUBMIT_FENCE_SN_IN | \
0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
-#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
- MSM_SUBMIT_SYNCOBJ_RESET | \
- 0)
-
-struct drm_msm_gem_submit_syncobj {
- __u32 handle; /* in, syncobj handle. */
- __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
- __u64 point; /* in, timepoint for timeline syncobjs. */
-};
-
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -293,13 +336,80 @@ struct drm_msm_gem_submit {
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_syncobj */
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
__u32 pad; /*in, reserved for future use, always 0. */
+};
+#define MSM_VM_BIND_OP_UNMAP 0
+#define MSM_VM_BIND_OP_MAP 1
+#define MSM_VM_BIND_OP_MAP_NULL 2
+
+#define MSM_VM_BIND_OP_DUMP 1
+#define MSM_VM_BIND_OP_FLAGS ( \
+ MSM_VM_BIND_OP_DUMP | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind_op - bind/unbind op to run
+ */
+struct drm_msm_vm_bind_op {
+ /** @op: one of MSM_VM_BIND_OP_x */
+ __u32 op;
+ /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */
+ __u32 handle;
+ /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */
+ __u64 obj_offset;
+ /** @iova: Address to operate on */
+ __u64 iova;
+ /** @range: Number of bites to to map/unmap */
+ __u64 range;
+ /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */
+ __u32 flags;
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+#define MSM_VM_BIND_FENCE_FD_IN 0x00000001
+#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002
+#define MSM_VM_BIND_FLAGS ( \
+ MSM_VM_BIND_FENCE_FD_IN | \
+ MSM_VM_BIND_FENCE_FD_OUT | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND
+ */
+struct drm_msm_vm_bind {
+ /** @flags: in, bitmask of MSM_VM_BIND_x */
+ __u32 flags;
+ /** @nr_ops: the number of bind ops in this ioctl */
+ __u32 nr_ops;
+ /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */
+ __s32 fence_fd;
+ /** @queue_id: in, submitqueue id */
+ __u32 queue_id;
+ /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 in_syncobjs;
+ /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 out_syncobjs;
+ /** @nr_in_syncobjs: in, number of entries in in_syncobj */
+ __u32 nr_in_syncobjs;
+ /** @nr_out_syncobjs: in, number of entries in out_syncobj */
+ __u32 nr_out_syncobjs;
+ /** @syncobj_stride: in, stride of syncobj arrays */
+ __u32 syncobj_stride;
+ /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */
+ __u32 op_stride;
+ union {
+ /** @op: used if num_ops == 1 */
+ struct drm_msm_vm_bind_op op;
+ /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */
+ __u64 ops;
+ };
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
@@ -345,12 +455,19 @@ struct drm_msm_gem_madvise {
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
- * backwards compatibility as a "default" submitqueue
+ * backwards compatibility as a "default" submitqueue.
+ *
+ * Because VM_BIND async updates happen on the CPU, they must run on a
+ * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had
+ * a way to do pgtable updates on the GPU, we could drop this restriction.
*/
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
+#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */
+
#define MSM_SUBMITQUEUE_FLAGS ( \
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
+ MSM_SUBMITQUEUE_VM_BIND | \
0)
/*
@@ -388,6 +505,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
+#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
@@ -401,6 +519,7 @@ struct drm_msm_submitqueue_query {
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
+#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus)
}