summaryrefslogtreecommitdiff
path: root/include/uapi/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi/drm')
-rw-r--r--include/uapi/drm/amdgpu_drm.h319
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h4
-rw-r--r--include/uapi/drm/drm_fourcc.h101
-rw-r--r--include/uapi/drm/ivpu_accel.h18
-rw-r--r--include/uapi/drm/msm_drm.h149
-rw-r--r--include/uapi/drm/nova_drm.h101
-rw-r--r--include/uapi/drm/panfrost_drm.h21
-rw-r--r--include/uapi/drm/panthor_drm.h64
-rw-r--r--include/uapi/drm/virtgpu_drm.h6
-rw-r--r--include/uapi/drm/xe_drm.h23
11 files changed, 1977 insertions, 23 deletions
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 25d5c6e90a99..bdedbaccf776 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -54,6 +54,9 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
+#define DRM_AMDGPU_USERQ 0x16
+#define DRM_AMDGPU_USERQ_SIGNAL 0x17
+#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -71,6 +74,9 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
+#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
+#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/**
* DOC: memory domains
@@ -319,6 +325,260 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
+/* user queue IOCTL operations */
+#define AMDGPU_USERQ_OP_CREATE 1
+#define AMDGPU_USERQ_OP_FREE 2
+
+/* queue priority levels */
+/* low < normal low < normal high < high */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
+/* for queues that need access to protected content */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
+
+/*
+ * This structure is a container to pass input configuration
+ * info for all supported userqueue related operations.
+ * For operation AMDGPU_USERQ_OP_CREATE: user is expected
+ * to set all fields, excep the parameter 'queue_id'.
+ * For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
+ * to be set is 'queue_id', eveything else is ignored.
+ */
+struct drm_amdgpu_userq_in {
+ /** AMDGPU_USERQ_OP_* */
+ __u32 op;
+ /** Queue id passed for operation USERQ_OP_FREE */
+ __u32 queue_id;
+ /** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
+ __u32 ip_type;
+ /**
+ * @doorbell_handle: the handle of doorbell GEM object
+ * associated with this userqueue client.
+ */
+ __u32 doorbell_handle;
+ /**
+ * @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
+ * Kernel will generate absolute doorbell offset using doorbell_handle
+ * and doorbell_offset in the doorbell bo.
+ */
+ __u32 doorbell_offset;
+ /**
+ * @flags: flags used for queue parameters
+ */
+ __u32 flags;
+ /**
+ * @queue_va: Virtual address of the GPU memory which holds the queue
+ * object. The queue holds the workload packets.
+ */
+ __u64 queue_va;
+ /**
+ * @queue_size: Size of the queue in bytes, this needs to be 256-byte
+ * aligned.
+ */
+ __u64 queue_size;
+ /**
+ * @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ */
+ __u64 rptr_va;
+ /**
+ * @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ *
+ * Queue, RPTR and WPTR can come from the same object, as long as the size
+ * and alignment related requirements are met.
+ */
+ __u64 wptr_va;
+ /**
+ * @mqd: MQD (memory queue descriptor) is a set of parameters which allow
+ * the GPU to uniquely define and identify a usermode queue.
+ *
+ * MQD data can be of different size for different GPU IP/engine and
+ * their respective versions/revisions, so this points to a __u64 *
+ * which holds IP specific MQD of this usermode queue.
+ */
+ __u64 mqd;
+ /**
+ * @size: size of MQD data in bytes, it must match the MQD structure
+ * size of the respective engine/revision defined in UAPI for ex, for
+ * gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
+ */
+ __u64 mqd_size;
+};
+
+/* The structure to carry output of userqueue ops */
+struct drm_amdgpu_userq_out {
+ /**
+ * For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
+ * queue ID to represent the newly created userqueue in the system, otherwise
+ * it should be ignored.
+ */
+ __u32 queue_id;
+ __u32 _pad;
+};
+
+union drm_amdgpu_userq {
+ struct drm_amdgpu_userq_in in;
+ struct drm_amdgpu_userq_out out;
+};
+
+/* GFX V11 IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_gfx11 {
+ /**
+ * @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 shadow_va;
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 SDMA IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_sdma_gfx11 {
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 Compute IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_compute_gfx11 {
+ /**
+ * @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 eop_va;
+};
+
+/* userq signal/wait ioctl */
+struct drm_amdgpu_userq_signal {
+ /**
+ * @queue_id: Queue handle used by the userq fence creation function
+ * to retrieve the WPTR.
+ */
+ __u32 queue_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to be signaled.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u64 num_syncobj_handles;
+ /**
+ * @bo_read_handles: The list of BO handles that the submitted user queue job
+ * is using for read only. This will update BO fences in the kernel.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of BO handles that the submitted user queue job
+ * is using for write only. This will update BO fences in the kernel.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+};
+
+struct drm_amdgpu_userq_fence_info {
+ /**
+ * @va: A gpu address allocated for each queue which stores the
+ * read pointer (RPTR) value.
+ */
+ __u64 va;
+ /**
+ * @value: A 64 bit value represents the write pointer (WPTR) of the
+ * queue commands which compared with the RPTR value to signal the
+ * fences.
+ */
+ __u64 value;
+};
+
+struct drm_amdgpu_userq_wait {
+ /**
+ * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
+ * wait queue and maintain the fence driver references in it.
+ */
+ __u32 waitq_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
+ * the user queue job to get the va/value pairs at given @syncobj_timeline_points.
+ */
+ __u64 syncobj_timeline_handles;
+ /**
+ * @syncobj_timeline_points: The list of timeline syncobj points submitted by the
+ * user queue job for the corresponding @syncobj_timeline_handles.
+ */
+ __u64 syncobj_timeline_points;
+ /**
+ * @bo_read_handles: The list of read BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of write BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_syncobj_timeline_handles: A count that represents the number of timeline
+ * syncobj handles in @syncobj_timeline_handles.
+ */
+ __u16 num_syncobj_timeline_handles;
+ /**
+ * @num_fences: This field can be used both as input and output. As input it defines
+ * the maximum number of fences that can be returned and as output it will specify
+ * how many fences were actually returned from the ioctl.
+ */
+ __u16 num_fences;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u32 num_syncobj_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+ /**
+ * @out_fences: The field is a return value from the ioctl containing the list of
+ * address/value pairs to wait for.
+ */
+ __u64 out_fences;
+};
+
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@@ -599,6 +859,19 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
+ /**
+ * vm_timeline_point is a sequence number used to add new timeline point.
+ */
+ __u64 vm_timeline_point;
+ /**
+ * The vm page table update fence is installed in given vm_timeline_syncobj_out
+ * at vm_timeline_point.
+ */
+ __u32 vm_timeline_syncobj_out;
+ /** the number of syncobj handles in @input_fence_syncobj_handles */
+ __u32 num_syncobj_handles;
+ /** Array of sync object handle to wait for given input fences */
+ __u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@@ -940,6 +1213,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
+/* query FW object size and alignment */
+#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -1093,6 +1368,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
+#define AMDGPU_VRAM_TYPE_HBM3E 13
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1198,6 +1474,9 @@ struct drm_amdgpu_info_device {
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
+ /* Userq IP mask (1 << AMDGPU_HW_IP_*) */
+ __u32 userq_ip_mask;
+ __u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@@ -1214,6 +1493,29 @@ struct drm_amdgpu_info_hw_ip {
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
+ /* Userq available slots */
+ __u32 userq_num_slots;
+};
+
+/* GFX metadata BO sizes and alignment info (in bytes) */
+struct drm_amdgpu_info_uq_fw_areas_gfx {
+ /* shadow area size */
+ __u32 shadow_size;
+ /* shadow area base virtual mem alignment */
+ __u32 shadow_alignment;
+ /* context save area size */
+ __u32 csa_size;
+ /* context save area base virtual mem alignment */
+ __u32 csa_alignment;
+};
+
+/* IP specific fw related information used in the
+ * subquery AMDGPU_INFO_UQ_FW_AREAS
+ */
+struct drm_amdgpu_info_uq_fw_areas {
+ union {
+ struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
+ };
};
struct drm_amdgpu_info_num_handles {
@@ -1279,6 +1581,23 @@ struct drm_amdgpu_info_gpuvm_fault {
__u32 vmhub;
};
+struct drm_amdgpu_info_uq_metadata_gfx {
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
+};
+
+struct drm_amdgpu_info_uq_metadata {
+ union {
+ struct drm_amdgpu_info_uq_metadata_gfx gfx;
+ };
+};
+
/*
* Supported GPU families
*/
diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h
new file mode 100644
index 000000000000..de67f1c603af
--- /dev/null
+++ b/include/uapi/drm/asahi_drm.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ * Copyright (C) 2018-2023 Collabora Ltd.
+ * Copyright (C) 2014-2018 Broadcom
+ */
+#ifndef _ASAHI_DRM_H_
+#define _ASAHI_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction to the Asahi UAPI
+ *
+ * This documentation describes the Asahi IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed
+ * from Panthor):
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ * naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ * pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ * zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ * directly passed to the ioctl). Those fields can be added at the end of
+ * the structure, or replace existing padding fields. Any new field being
+ * added must preserve the behavior that existed before those fields were
+ * added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ * main structure), iff those objects are passed a size to reflect the
+ * size known by the userspace driver (see
+ * drm_asahi_cmd_header::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ * ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ * (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ * the userspace driver doesn't have to guess which flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ * extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ * at the end of the drm_asahi_ioctl_id enum.
+ */
+
+/**
+ * enum drm_asahi_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx
+ * definitions instead.
+ */
+enum drm_asahi_ioctl_id {
+ /** @DRM_ASAHI_GET_PARAMS: Query device properties. */
+ DRM_ASAHI_GET_PARAMS = 0,
+
+ /** @DRM_ASAHI_GET_TIME: Query device time. */
+ DRM_ASAHI_GET_TIME,
+
+ /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */
+ DRM_ASAHI_VM_CREATE,
+
+ /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */
+ DRM_ASAHI_VM_DESTROY,
+
+ /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */
+ DRM_ASAHI_VM_BIND,
+
+ /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */
+ DRM_ASAHI_GEM_CREATE,
+
+ /**
+ * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a
+ * given GEM handle.
+ */
+ DRM_ASAHI_GEM_MMAP_OFFSET,
+
+ /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */
+ DRM_ASAHI_GEM_BIND_OBJECT,
+
+ /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */
+ DRM_ASAHI_QUEUE_CREATE,
+
+ /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */
+ DRM_ASAHI_QUEUE_DESTROY,
+
+ /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */
+ DRM_ASAHI_SUBMIT,
+};
+
+#define DRM_ASAHI_MAX_CLUSTERS 64
+
+/**
+ * struct drm_asahi_params_global - Global parameters.
+ *
+ * This struct may be queried by drm_asahi_get_params.
+ */
+struct drm_asahi_params_global {
+ /** @features: Feature bits from drm_asahi_feature */
+ __u64 features;
+
+ /** @gpu_generation: GPU generation, e.g. 13 for G13G */
+ __u32 gpu_generation;
+
+ /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */
+ __u32 gpu_variant;
+
+ /**
+ * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or
+ * 0x21 for 'C1'
+ */
+ __u32 gpu_revision;
+
+ /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
+ __u32 chip_id;
+
+ /** @num_dies: Number of dies in the SoC */
+ __u32 num_dies;
+
+ /** @num_clusters_total: Number of GPU clusters (across all dies) */
+ __u32 num_clusters_total;
+
+ /**
+ * @num_cores_per_cluster: Number of logical cores per cluster
+ * (including inactive/nonexistent)
+ */
+ __u32 num_cores_per_cluster;
+
+ /** @max_frequency_khz: Maximum GPU core clock frequency */
+ __u32 max_frequency_khz;
+
+ /** @core_masks: Bitmask of present/enabled cores per cluster */
+ __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
+
+ /**
+ * @vm_start: VM range start VMA. Together with @vm_end, this defines
+ * the window of valid GPU VAs. Userspace is expected to subdivide VAs
+ * out of this window.
+ *
+ * This window contains all virtual addresses that userspace needs to
+ * know about. There may be kernel-internal GPU VAs outside this range,
+ * but that detail is not relevant here.
+ */
+ __u64 vm_start;
+
+ /** @vm_end: VM range end VMA */
+ __u64 vm_end;
+
+ /**
+ * @vm_kernel_min_size: Minimum kernel VMA window size.
+ *
+ * When creating a VM, userspace is required to carve out a section of
+ * virtual addresses (within the range given by @vm_start and
+ * @vm_end). The kernel will allocate various internal structures
+ * within the specified VA range.
+ *
+ * Allowing userspace to choose the VA range for the kernel, rather than
+ * the kernel reserving VAs and requiring userspace to cope, can assist
+ * in implementing SVM.
+ */
+ __u64 vm_kernel_min_size;
+
+ /**
+ * @max_commands_per_submission: Maximum number of supported commands
+ * per submission. This mirrors firmware limits. Userspace must split up
+ * larger command buffers, which may require inserting additional
+ * synchronization.
+ */
+ __u32 max_commands_per_submission;
+
+ /**
+ * @max_attachments: Maximum number of drm_asahi_attachment's per
+ * command
+ */
+ __u32 max_attachments;
+
+ /**
+ * @command_timestamp_frequency_hz: Timebase frequency for timestamps
+ * written during command execution, specified via drm_asahi_timestamp
+ * structures. As this rate is controlled by the firmware, it is a
+ * queryable parameter.
+ *
+ * Userspace must divide by this frequency to convert timestamps to
+ * seconds, rather than hardcoding a particular firmware's rate.
+ */
+ __u64 command_timestamp_frequency_hz;
+};
+
+/**
+ * enum drm_asahi_feature - Feature bits
+ *
+ * This covers only features that userspace cannot infer from the architecture
+ * version. Most features don't need to be here.
+ */
+enum drm_asahi_feature {
+ /**
+ * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader
+ * loads of unmapped memory will return zero. Shader stores to unmapped
+ * memory will be silently discarded. Note that only shader load/store
+ * is affected. Other hardware units are not affected, notably including
+ * texture sampling.
+ *
+ * Soft fault is set when initializing the GPU and cannot be runtime
+ * toggled. Therefore, it is exposed as a feature bit and not a
+ * userspace-settable flag on the VM. When soft fault is enabled,
+ * userspace can speculate memory accesses more aggressively.
+ */
+ DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0,
+};
+
+/**
+ * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS
+ */
+struct drm_asahi_get_params {
+ /** @param_group: Parameter group to fetch (MBZ) */
+ __u32 param_group;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @pointer: User pointer to write parameter struct */
+ __u64 pointer;
+
+ /**
+ * @size: Size of the user buffer. In case of older userspace, this may
+ * be less than sizeof(struct drm_asahi_params_global). The kernel will
+ * not write past the length specified here, allowing extensibility.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE
+ */
+struct drm_asahi_vm_create {
+ /**
+ * @kernel_start: Start of the kernel-reserved address range. See
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Both @kernel_start and @kernel_end must be within the range of
+ * valid VAs given by drm_asahi_params_global::vm_start and
+ * drm_asahi_params_global::vm_end. The size of the kernel range
+ * (@kernel_end - @kernel_start) must be at least
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Userspace must not bind any memory on this VM into this reserved
+ * range, it is for kernel use only.
+ */
+ __u64 kernel_start;
+
+ /**
+ * @kernel_end: End of the kernel-reserved address range. See
+ * @kernel_start.
+ */
+ __u64 kernel_end;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY
+ */
+struct drm_asahi_vm_destroy {
+ /** @vm_id: VM ID to be destroyed */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_gem_flags - Flags for GEM creation
+ */
+enum drm_asahi_gem_flags {
+ /**
+ * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback.
+ *
+ * Map as writeback instead of write-combine. This optimizes for CPU
+ * reads.
+ */
+ DRM_ASAHI_GEM_WRITEBACK = (1L << 0),
+
+ /**
+ * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports).
+ */
+ DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1),
+};
+
+/**
+ * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE
+ */
+struct drm_asahi_gem_create {
+ /** @size: Size of the BO */
+ __u64 size;
+
+ /** @flags: Combination of drm_asahi_gem_flags flags. */
+ __u32 flags;
+
+ /**
+ * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set
+ */
+ __u32 vm_id;
+
+ /** @handle: Returned GEM handle for the BO */
+ __u32 handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_gem_mmap_offset - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET
+ */
+struct drm_asahi_gem_mmap_offset {
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+};
+
+/**
+ * enum drm_asahi_bind_flags - Flags for GEM binding
+ */
+enum drm_asahi_bind_flags {
+ /**
+ * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range,
+ * simply unbind the GPU VMA range.
+ */
+ DRM_ASAHI_BIND_UNBIND = (1L << 0),
+
+ /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */
+ DRM_ASAHI_BIND_READ = (1L << 1),
+
+ /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */
+ DRM_ASAHI_BIND_WRITE = (1L << 2),
+
+ /**
+ * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly
+ * across the VA range.
+ *
+ * This is useful to fill a VA range with scratch pages or zero pages.
+ * It is intended as a mechanism to accelerate sparse.
+ */
+ DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3),
+};
+
+/**
+ * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation.
+ */
+struct drm_asahi_gem_bind_op {
+ /** @flags: Combination of drm_asahi_bind_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind (except for UNBIND) */
+ __u32 handle;
+
+ /**
+ * @offset: Offset into the object (except for UNBIND).
+ *
+ * For a regular bind, this is the beginning of the region of the GEM
+ * object to bind.
+ *
+ * For a single-page bind, this is the offset to the single page that
+ * will be repeatedly bound.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 offset;
+
+ /**
+ * @range: Number of bytes to bind/unbind to @addr.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 range;
+
+ /**
+ * @addr: Address to bind to.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 addr;
+};
+
+/**
+ * struct drm_asahi_vm_bind - Arguments passed to
+ * DRM_IOCTL_ASAHI_VM_BIND
+ */
+struct drm_asahi_vm_bind {
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /** @num_binds: number of binds in this IOCTL. */
+ __u32 num_binds;
+
+ /**
+ * @stride: Stride in bytes between consecutive binds. This allows
+ * extensibility of drm_asahi_gem_bind_op.
+ */
+ __u32 stride;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /**
+ * @userptr: User pointer to an array of @num_binds structures of type
+ * @drm_asahi_gem_bind_op and size @stride bytes.
+ */
+ __u64 userptr;
+};
+
+/**
+ * enum drm_asahi_bind_object_op - Special object bind operation
+ */
+enum drm_asahi_bind_object_op {
+ /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_BIND = 0,
+
+ /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1,
+};
+
+/**
+ * enum drm_asahi_bind_object_flags - Special object bind flags
+ */
+enum drm_asahi_bind_object_flags {
+ /**
+ * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp
+ * buffer.
+ */
+ DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0),
+};
+
+/**
+ * struct drm_asahi_gem_bind_object - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT
+ */
+struct drm_asahi_gem_bind_object {
+ /** @op: Bind operation (enum drm_asahi_bind_object_op) */
+ __u32 op;
+
+ /** @flags: Combination of drm_asahi_bind_object_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind/unbind (BIND) */
+ __u32 handle;
+
+ /** @vm_id: The ID of the VM to operate on (MBZ currently) */
+ __u32 vm_id;
+
+ /** @offset: Offset into the object (BIND only) */
+ __u64 offset;
+
+ /** @range: Number of bytes to bind/unbind (BIND only) */
+ __u64 range;
+
+ /** @object_handle: Object handle (out for BIND, in for UNBIND) */
+ __u32 object_handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_cmd_type - Command type
+ */
+enum drm_asahi_cmd_type {
+ /**
+ * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render
+ * subqueue. Combined vertex and fragment operation.
+ *
+ * Followed by a @drm_asahi_cmd_render payload.
+ */
+ DRM_ASAHI_CMD_RENDER = 0,
+
+ /**
+ * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue.
+ *
+ * Followed by a @drm_asahi_cmd_compute payload.
+ */
+ DRM_ASAHI_CMD_COMPUTE = 1,
+
+ /**
+ * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set
+ * attachments for subsequent vertex shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2,
+
+ /**
+ * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set
+ * attachments for subsequent fragment shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3,
+
+ /**
+ * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set
+ * attachments for subsequent compute shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4,
+};
+
+/**
+ * enum drm_asahi_priority - Scheduling queue priority.
+ *
+ * These priorities are forwarded to the firmware to influence firmware
+ * scheduling. The exact policy is ultimately decided by firmware, but
+ * these enums allow userspace to communicate the intentions.
+ */
+enum drm_asahi_priority {
+ /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */
+ DRM_ASAHI_PRIORITY_LOW = 0,
+
+ /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */
+ DRM_ASAHI_PRIORITY_MEDIUM = 1,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_HIGH: High priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_HIGH = 2,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_REALTIME = 3,
+};
+
+/**
+ * struct drm_asahi_queue_create - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_CREATE
+ */
+struct drm_asahi_queue_create {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @vm_id: The ID of the VM this queue is bound to */
+ __u32 vm_id;
+
+ /** @priority: One of drm_asahi_priority */
+ __u32 priority;
+
+ /** @queue_id: The returned queue ID */
+ __u32 queue_id;
+
+ /**
+ * @usc_exec_base: GPU base address for all USC binaries (shaders) on
+ * this queue. USC addresses are 32-bit relative to this 64-bit base.
+ *
+ * This sets the following registers on all queue commands:
+ *
+ * USC_EXEC_BASE_TA (vertex)
+ * USC_EXEC_BASE_ISP (fragment)
+ * USC_EXEC_BASE_CP (compute)
+ *
+ * While the hardware lets us configure these independently per command,
+ * we do not have a use case for this. Instead, we expect userspace to
+ * fix a 4GiB VA carveout for USC memory and pass its base address here.
+ */
+ __u64 usc_exec_base;
+};
+
+/**
+ * struct drm_asahi_queue_destroy - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_DESTROY
+ */
+struct drm_asahi_queue_destroy {
+ /** @queue_id: The queue ID to be destroyed */
+ __u32 queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_sync_type - Sync item type
+ */
+enum drm_asahi_sync_type {
+ /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */
+ DRM_ASAHI_SYNC_SYNCOBJ = 0,
+
+ /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */
+ DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
+};
+
+/**
+ * struct drm_asahi_sync - Sync item
+ */
+struct drm_asahi_sync {
+ /** @sync_type: One of drm_asahi_sync_type */
+ __u32 sync_type;
+
+ /** @handle: The sync object handle */
+ __u32 handle;
+
+ /** @timeline_value: Timeline value for timeline sync objects */
+ __u64 timeline_value;
+};
+
+/**
+ * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier
+ *
+ * This special value may be passed in to drm_asahi_command::vdm_barrier or
+ * drm_asahi_command::cdm_barrier to indicate that the respective subqueue
+ * should not wait on any previous work.
+ */
+#define DRM_ASAHI_BARRIER_NONE (0xFFFFu)
+
+/**
+ * struct drm_asahi_cmd_header - Top level command structure
+ *
+ * This struct is core to the command buffer definition and therefore is not
+ * extensible.
+ */
+struct drm_asahi_cmd_header {
+ /** @cmd_type: One of drm_asahi_cmd_type */
+ __u16 cmd_type;
+
+ /**
+ * @size: Size of this command, not including this header.
+ *
+ * For hardware commands, this enables extensibility of commands without
+ * requiring extra command types. Passing a command that is shorter
+ * than expected is explicitly allowed for backwards-compatibility.
+ * Truncated fields will be zeroed.
+ *
+ * For the synthetic attachment setting commands, this implicitly
+ * encodes the number of attachments. These commands take multiple
+ * fixed-size @drm_asahi_attachment structures as their payload, so size
+ * equals number of attachments * sizeof(struct drm_asahi_attachment).
+ */
+ __u16 size;
+
+ /**
+ * @vdm_barrier: VDM (render) command index to wait on.
+ *
+ * Barriers are indices relative to the beginning of a given submit. A
+ * barrier of 0 waits on commands submitted to the respective subqueue
+ * in previous submit ioctls. A barrier of N waits on N previous
+ * commands on the subqueue within the current submit ioctl. As a
+ * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any
+ * commands in the subqueue.
+ *
+ * Examples:
+ *
+ * 0: This waits on all previous work.
+ *
+ * NONE: This does not wait for anything on this subqueue.
+ *
+ * 1: This waits on the first render command in the submit.
+ * This is valid only if there are multiple render commands in the
+ * same submit.
+ *
+ * Barriers are valid only for hardware commands. Synthetic software
+ * commands to set attachments must pass NONE here.
+ */
+ __u16 vdm_barrier;
+
+ /**
+ * @cdm_barrier: CDM (compute) command index to wait on.
+ *
+ * See @vdm_barrier, and replace VDM/render with CDM/compute.
+ */
+ __u16 cdm_barrier;
+};
+
+/**
+ * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT
+ */
+struct drm_asahi_submit {
+ /**
+ * @syncs: An optional pointer to an array of drm_asahi_sync. The first
+ * @in_sync_count elements are in-syncs, then the remaining
+ * @out_sync_count elements are out-syncs. Using a single array with
+ * explicit partitioning simplifies handling.
+ */
+ __u64 syncs;
+
+ /**
+ * @cmdbuf: Pointer to the command buffer to submit.
+ *
+ * This is a flat command buffer. By design, it contains no CPU
+ * pointers, which makes it suitable for a virtgpu wire protocol without
+ * requiring any serializing/deserializing step.
+ *
+ * It consists of a series of commands. Each command begins with a
+ * fixed-size @drm_asahi_cmd_header header and is followed by a
+ * variable-length payload according to the type and size in the header.
+ *
+ * The combined count of "real" hardware commands must be nonzero and at
+ * most drm_asahi_params_global::max_commands_per_submission.
+ */
+ __u64 cmdbuf;
+
+ /** @flags: Flags for command submission (MBZ) */
+ __u32 flags;
+
+ /** @queue_id: The queue ID to be submitted to */
+ __u32 queue_id;
+
+ /**
+ * @in_sync_count: Number of sync objects to wait on before starting
+ * this job.
+ */
+ __u32 in_sync_count;
+
+ /**
+ * @out_sync_count: Number of sync objects to signal upon completion of
+ * this job.
+ */
+ __u32 out_sync_count;
+
+ /** @cmdbuf_size: Command buffer size in bytes */
+ __u32 cmdbuf_size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_attachment - Describe an "attachment".
+ *
+ * Attachments are any memory written by shaders, notably including render
+ * target attachments written by the end-of-tile program. This is purely a hint
+ * about the accessed memory regions. It is optional to specify, which is
+ * fortunate as it cannot be specified precisely with bindless access anyway.
+ * But where possible, it's probably a good idea for userspace to include these
+ * hints, forwarded to the firmware.
+ *
+ * This struct is implicitly sized and therefore is not extensible.
+ */
+struct drm_asahi_attachment {
+ /** @pointer: Base address of the attachment */
+ __u64 pointer;
+
+ /** @size: Size of the attachment in bytes */
+ __u64 size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @flags: MBZ */
+ __u32 flags;
+};
+
+enum drm_asahi_render_flags {
+ /**
+ * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch
+ * memory.
+ */
+ DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0),
+
+ /**
+ * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles.
+ * This must be set when clearing render targets.
+ */
+ DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1),
+
+ /**
+ * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single
+ * cluster (on multi-cluster GPUs)
+ *
+ * This harms performance but can workaround certain sync/coherency
+ * bugs, and therefore is useful for debugging.
+ */
+ DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2),
+
+ /**
+ * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula.
+ *
+ * Graphics specifications contain two alternate formulas for depth
+ * bias, a float formula used with floating-point depth buffers and an
+ * integer formula using with unorm depth buffers. This flag specifies
+ * that the integer formula should be used. If omitted, the float
+ * formula is used instead.
+ *
+ * This corresponds to bit 18 of the relevant hardware control register,
+ * so we match that here for efficiency.
+ */
+ DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18),
+};
+
+/**
+ * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer.
+ *
+ * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit.
+ * There are three hardware registers for each field respectively for loads,
+ * stores, and partial renders. In practice, it makes sense to set all to the
+ * same values, except in exceptional cases not yet implemented in userspace, so
+ * we do not duplicate here for simplicity/efficiency.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_zls_buffer {
+ /** @base: Base address of the buffer */
+ __u64 base;
+
+ /**
+ * @comp_base: If the load buffer is compressed, address of the
+ * compression metadata section.
+ */
+ __u64 comp_base;
+
+ /**
+ * @stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the buffer.
+ */
+ __u32 stride;
+
+ /**
+ * @comp_stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the compression metadata.
+ */
+ __u32 comp_stride;
+};
+
+/**
+ * struct drm_asahi_timestamp - Describe a timestamp write.
+ *
+ * The firmware can optionally write the GPU timestamp at render pass
+ * granularities, but it needs to be mapped specially via
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to
+ * write as a handle-offset pair, rather than a GPU address like normal.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamp {
+ /**
+ * @handle: Handle of the timestamp buffer, or 0 to skip this
+ * timestamp. If nonzero, this must equal the value returned in
+ * drm_asahi_gem_bind_object::object_handle.
+ */
+ __u32 handle;
+
+ /** @offset: Offset to write into the timestamp buffer */
+ __u32 offset;
+};
+
+/**
+ * struct drm_asahi_timestamps - Describe timestamp writes.
+ *
+ * Each operation that can be timestamped, can be timestamped at the start and
+ * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled
+ * together into drm_asahi_timestamps.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamps {
+ /** @start: Timestamp recorded at the start of the operation */
+ struct drm_asahi_timestamp start;
+
+ /** @end: Timestamp recorded at the end of the operation */
+ struct drm_asahi_timestamp end;
+};
+
+/**
+ * struct drm_asahi_helper_program - Describe helper program configuration.
+ *
+ * The helper program is a compute-like kernel required for various hardware
+ * functionality. Its most important role is dynamically allocating
+ * scratch/stack memory for individual subgroups, by partitioning a static
+ * allocation shared for the whole device. It is supplied by userspace via
+ * drm_asahi_helper_program and internally dispatched by the hardware as needed.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_helper_program {
+ /**
+ * @binary: USC address to the helper program binary. This is a tagged
+ * pointer with configuration in the bottom bits.
+ */
+ __u32 binary;
+
+ /** @cfg: Additional configuration bits for the helper program. */
+ __u32 cfg;
+
+ /**
+ * @data: Data passed to the helper program. This value is not
+ * interpreted by the kernel, firmware, or hardware in any way. It is
+ * simply a sideband for userspace, set with the submit ioctl and read
+ * via special registers inside the helper program.
+ *
+ * In practice, userspace will pass a 64-bit GPU VA here pointing to the
+ * actual arguments, which presumably don't fit in 64-bits.
+ */
+ __u64 data;
+};
+
+/**
+ * struct drm_asahi_bg_eot - Describe a background or end-of-tile program.
+ *
+ * The background and end-of-tile programs are dispatched by the hardware at the
+ * beginning and end of rendering. As the hardware "tilebuffer" is simply local
+ * memory, these programs are necessary to implement API-level render targets.
+ * The fragment-like background program is responsible for loading either the
+ * clear colour or the existing render target contents, while the compute-like
+ * end-of-tile program stores the tilebuffer contents to memory.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_bg_eot {
+ /**
+ * @usc: USC address of the hardware USC words binding resources
+ * (including images and uniforms) and the program itself. Note this is
+ * an additional layer of indirection compared to the helper program,
+ * avoiding the need for a sideband for data. This is a tagged pointer
+ * with additional configuration in the bottom bits.
+ */
+ __u32 usc;
+
+ /**
+ * @rsrc_spec: Resource specifier for the program. This is a packed
+ * hardware data structure describing the required number of registers,
+ * uniforms, bound textures, and bound samplers.
+ */
+ __u32 rsrc_spec;
+};
+
+/**
+ * struct drm_asahi_cmd_render - Command to submit 3D
+ *
+ * This command submits a single render pass. The hardware control stream may
+ * include many draws and subpasses, but within the command, the framebuffer
+ * dimensions and attachments are fixed.
+ *
+ * The hardware requires the firmware to set a large number of Control Registers
+ * setting up state at render pass granularity before each command rendering 3D.
+ * The firmware bundles this state into data structures. Unfortunately, we
+ * cannot expose either any of that directly to userspace, because the
+ * kernel-firmware ABI is not stable. Although we can guarantee the firmware
+ * updates in tandem with the kernel, we cannot break old userspace when
+ * upgrading the firmware and kernel. Therefore, we need to abstract well the
+ * data structures to avoid tying our hands with future firmwares.
+ *
+ * The bulk of drm_asahi_cmd_render therefore consists of values of hardware
+ * control registers, marshalled via the firmware interface.
+ *
+ * The framebuffer/tilebuffer dimensions are also specified here. In addition to
+ * being passed to the firmware/hardware, the kernel requires these dimensions
+ * to calculate various essential tiling-related data structures. It is
+ * unfortunate that our submits are heavier than on vendors with saner
+ * hardware-software interfaces. The upshot is all of this information is
+ * readily available to userspace with all current APIs.
+ *
+ * It looks odd - but it's not overly burdensome and it ensures we can remain
+ * compatible with old userspace.
+ */
+struct drm_asahi_cmd_render {
+ /** @flags: Combination of drm_asahi_render_flags flags. */
+ __u32 flags;
+
+ /**
+ * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the
+ * depth/stencil width/height, which may differ from the framebuffer
+ * width/height.
+ */
+ __u32 isp_zls_pixels;
+
+ /**
+ * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the VDM control stream.
+ */
+ __u64 vdm_ctrl_stream_base;
+
+ /** @vertex_helper: Helper program used for the vertex shader */
+ struct drm_asahi_helper_program vertex_helper;
+
+ /** @fragment_helper: Helper program used for the fragment shader */
+ struct drm_asahi_helper_program fragment_helper;
+
+ /**
+ * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an
+ * array of scissor descriptors indexed in the render pass.
+ */
+ __u64 isp_scissor_base;
+
+ /**
+ * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an
+ * array of depth bias values indexed in the render pass.
+ */
+ __u64 isp_dbias_base;
+
+ /**
+ * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an
+ * array of occlusion query results written by the render pass.
+ */
+ __u64 isp_oclqry_base;
+
+ /** @depth: Depth buffer */
+ struct drm_asahi_zls_buffer depth;
+
+ /** @stencil: Stencil buffer */
+ struct drm_asahi_zls_buffer stencil;
+
+ /** @zls_ctrl: ZLS_CTRL register value */
+ __u64 zls_ctrl;
+
+ /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */
+ __u64 ppp_multisamplectl;
+
+ /**
+ * @sampler_heap: Base address of the sampler heap. This heap is used
+ * for both vertex shaders and fragment shaders. The registers are
+ * per-stage, but there is no known use case for separate heaps.
+ */
+ __u64 sampler_heap;
+
+ /** @ppp_ctrl: PPP_CTRL register value */
+ __u32 ppp_ctrl;
+
+ /** @width_px: Framebuffer width in pixels */
+ __u16 width_px;
+
+ /** @height_px: Framebuffer height in pixels */
+ __u16 height_px;
+
+ /** @layers: Number of layers in the framebuffer */
+ __u16 layers;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u16 sampler_count;
+
+ /** @utile_width_px: Width of a logical tilebuffer tile in pixels */
+ __u8 utile_width_px;
+
+ /** @utile_height_px: Height of a logical tilebuffer tile in pixels */
+ __u8 utile_height_px;
+
+ /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */
+ __u8 samples;
+
+ /** @sample_size_B: # of bytes in the tilebuffer required per sample. */
+ __u8 sample_size_B;
+
+ /**
+ * @isp_merge_upper_x: 32-bit float used in the hardware triangle
+ * merging. Calculate as: tan(60 deg) * width.
+ *
+ * Making these values UAPI avoids requiring floating-point calculations
+ * in the kernel in the hot path.
+ */
+ __u32 isp_merge_upper_x;
+
+ /**
+ * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height.
+ * See @isp_merge_upper_x.
+ */
+ __u32 isp_merge_upper_y;
+
+ /** @bg: Background program run for each tile at the start */
+ struct drm_asahi_bg_eot bg;
+
+ /** @eot: End-of-tile program ran for each tile at the end */
+ struct drm_asahi_bg_eot eot;
+
+ /**
+ * @partial_bg: Background program ran at the start of each tile when
+ * resuming the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_bg;
+
+ /**
+ * @partial_eot: End-of-tile program ran at the end of each tile when
+ * pausing the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_eot;
+
+ /**
+ * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth
+ * buffer clear value, encoded in the depth buffer's format: either a
+ * 32-bit float or a 16-bit unorm (with upper bits zeroed).
+ */
+ __u32 isp_bgobjdepth;
+
+ /**
+ * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits
+ * contain the stencil buffer clear value.
+ */
+ __u32 isp_bgobjvals;
+
+ /** @ts_vtx: Timestamps for the vertex portion of the render */
+ struct drm_asahi_timestamps ts_vtx;
+
+ /** @ts_frag: Timestamps for the fragment portion of the render */
+ struct drm_asahi_timestamps ts_frag;
+};
+
+/**
+ * struct drm_asahi_cmd_compute - Command to submit compute
+ *
+ * This command submits a control stream consisting of compute dispatches. There
+ * is essentially no limit on how many compute dispatches may be included in a
+ * single compute command, although timestamps are at command granularity.
+ */
+struct drm_asahi_cmd_compute {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u32 sampler_count;
+
+ /**
+ * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the CDM control stream.
+ */
+ __u64 cdm_ctrl_stream_base;
+
+ /**
+ * @cdm_ctrl_stream_end: GPU base address to the end of the hardware
+ * control stream. Note this only considers the first contiguous segment
+ * of the control stream, as the stream might jump elsewhere.
+ */
+ __u64 cdm_ctrl_stream_end;
+
+ /** @sampler_heap: Base address of the sampler heap. */
+ __u64 sampler_heap;
+
+ /** @helper: Helper program used for this compute command */
+ struct drm_asahi_helper_program helper;
+
+ /** @ts: Timestamps for the compute command */
+ struct drm_asahi_timestamps ts;
+};
+
+/**
+ * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME
+ */
+struct drm_asahi_get_time {
+ /** @flags: MBZ. */
+ __u64 flags;
+
+ /** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */
+ __u64 gpu_timestamp;
+};
+
+/**
+ * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number
+ * @__access: Access type. Must be R, W or RW.
+ * @__id: One of the DRM_ASAHI_xxx id.
+ * @__type: Suffix of the type being passed to the IOCTL.
+ *
+ * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx
+ * values instead.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define DRM_IOCTL_ASAHI(__access, __id, __type) \
+ DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \
+ struct drm_asahi_ ## __type)
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+ DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params),
+ DRM_IOCTL_ASAHI_GET_TIME = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time),
+ DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create),
+ DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy),
+ DRM_IOCTL_ASAHI_VM_BIND = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind),
+ DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create),
+ DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset),
+ DRM_IOCTL_ASAHI_GEM_BIND_OBJECT = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object),
+ DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create),
+ DRM_IOCTL_ASAHI_QUEUE_DESTROY = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy),
+ DRM_IOCTL_ASAHI_SUBMIT = DRM_IOCTL_ASAHI(W, SUBMIT, submit),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _ASAHI_DRM_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 7fba37b94401..e63a71d3c607 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -905,13 +905,17 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
+
+ __u64 point;
};
struct drm_syncobj_transfer {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e41a3cec6a9e..ea91aa8afde9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -210,6 +210,10 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 48 bpp RGB */
+#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
+#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
+
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@@ -218,7 +222,7 @@ extern "C" {
#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
- * Floating point 64bpp RGB
+ * Half-Floating point - 16b/component
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
*/
@@ -228,6 +232,20 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+#define DRM_FORMAT_R16F fourcc_code('R', ' ', ' ', 'H') /* [15:0] R 16 little endian */
+#define DRM_FORMAT_GR1616F fourcc_code('G', 'R', ' ', 'H') /* [31:0] G:R 16:16 little endian */
+#define DRM_FORMAT_BGR161616F fourcc_code('B', 'G', 'R', 'H') /* [47:0] B:G:R 16:16:16 little endian */
+
+/*
+ * Floating point - 32b/component
+ * IEEE 754-2008 binary32 float
+ * [31:0] sign:exponent:mantissa 1:8:23
+ */
+#define DRM_FORMAT_R32F fourcc_code('R', ' ', ' ', 'F') /* [31:0] R 32 little endian */
+#define DRM_FORMAT_GR3232F fourcc_code('G', 'R', ' ', 'F') /* [63:0] R:G 32:32 little endian */
+#define DRM_FORMAT_BGR323232F fourcc_code('B', 'G', 'R', 'F') /* [95:0] R:G:B 32:32:32 little endian */
+#define DRM_FORMAT_ABGR32323232F fourcc_code('A', 'B', '8', 'F') /* [127:0] R:G:B:A 32:32:32:32 little endian */
+
/*
* RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
* of unused padding per component:
@@ -378,6 +396,42 @@ extern "C" {
#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^6=64. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [6:10] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
+ */
+#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S410 fourcc_code('S', '4', '1', '0') /* non-subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+
+/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^4=16. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [4:12] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
+ */
+#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S412 fourcc_code('S', '4', '1', '2') /* non-subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+
+/*
+ * 3 plane YCbCr
+ * index 0 = Y plane, [15:0] Y little endian
+ * index 1 = Cr plane, [15:0] Cr little endian
+ * index 2 = Cb plane, [15:0] Cb little endian
+ */
+#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S416 fourcc_code('S', '4', '1', '6') /* non-subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+
+/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
@@ -422,6 +476,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
+#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
@@ -1495,6 +1550,50 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
/*
+ * Apple GPU-tiled layouts.
+ *
+ * Apple GPUs support nonlinear tilings with optional lossless compression.
+ *
+ * GPU-tiled images are divided into 16KiB tiles:
+ *
+ * Bytes per pixel Tile size
+ * --------------- ---------
+ * 1 128x128
+ * 2 128x64
+ * 4 64x64
+ * 8 64x32
+ * 16 32x32
+ *
+ * Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
+ *
+ * Compressed images pad the body to 128-bytes and are immediately followed by a
+ * metadata section. The metadata section rounds the image dimensions to
+ * powers-of-two and contains 8 bytes for each 16x16 compression subtile.
+ * Subtiles are interleaved (Morton order).
+ *
+ * All images are 128-byte aligned.
+ *
+ * These layouts fundamentally do not have meaningful strides. No matter how we
+ * specify strides for these layouts, userspace unaware of Apple image layouts
+ * will be unable to use correctly the specified stride for any purpose.
+ * Userspace aware of the image layouts do not use strides. The most "correct"
+ * convention would be setting the image stride to 0. Unfortunately, some
+ * software assumes the stride is at least (width * bytes per pixel). We
+ * therefore require that stride equals (width * bytes per pixel). Since the
+ * stride is arbitrary here, we pick the simplest convention.
+ *
+ * Although containing two sections, compressed image layouts are treated in
+ * software as a single plane. This is modelled after AFBC, a similar
+ * scheme. Attempting to separate the sections to be "explicit" in DRM would
+ * only generate more confusion, as software does not treat the image this way.
+ *
+ * For detailed information on the hardware image layouts, see
+ * https://docs.mesa3d.org/drivers/asahi.html#image-layouts
+ */
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
+
+/*
* AMD modifiers
*
* Memory layout:
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 746c43bd3eb6..160ee1411d4a 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __UAPI_IVPU_DRM_H__
@@ -147,7 +147,7 @@ struct drm_ivpu_param {
* platform type when executing on a simulator or emulator (read-only)
*
* %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- * Current PLL frequency (read-only)
+ * Maximum frequency of the NPU data processing unit clock (read-only)
*
* %DRM_IVPU_PARAM_NUM_CONTEXTS:
* Maximum number of simultaneously existing contexts (read-only)
@@ -445,6 +445,9 @@ struct drm_ivpu_metric_streamer_get_data {
__u64 data_size;
};
+/* Command queue flags */
+#define DRM_IVPU_CMDQ_FLAG_TURBO 0x00000001
+
/**
* struct drm_ivpu_cmdq_create - Create command queue for job submission
*/
@@ -462,6 +465,17 @@ struct drm_ivpu_cmdq_create {
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * %DRM_IVPU_CMDQ_FLAG_TURBO
+ *
+ * Enable low-latency mode for the command queue. The NPU will maximize performance
+ * when executing jobs from such queue at the cost of increased power usage.
+ */
+ __u32 flags;
};
/**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 2342cb90857e..5c67294edc95 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -91,6 +91,32 @@ struct drm_msm_timespec {
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
+/* PRR (Partially Resident Region) is required for sparse residency: */
+#define MSM_PARAM_HAS_PRR 0x15 /* RO */
+/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
+ *
+ * With VM_BIND enabled, userspace is required to allocate iova and use the
+ * VM_BIND ops for map/unmap ioctls. MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
+ * will be rejected. (The latter does not have a sensible meaning when a BO
+ * can have multiple and/or partial mappings.)
+ *
+ * With VM_BIND enabled, userspace does not include a submit_bo table in the
+ * SUBMIT ioctl (this will be rejected), the resident set is determined by
+ * the the VM_BIND ops.
+ *
+ * Enabling VM_BIND will fail on devices which do not have per-process pgtables.
+ * And it is not allowed to disable VM_BIND once it has been enabled.
+ *
+ * Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
+ * submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
+ *
+ * Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
+ * from GPU faults or failed async VM_BIND ops, in particular because it is
+ * difficult to communicate to userspace which op failed so that userspace
+ * could rewind and try again. When the VM is marked unusable, the SUBMIT
+ * ioctl will throw -EPIPE.
+ */
+#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -114,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+/* Private buffers do not need to be explicitly listed in the SUBMIT
+ * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
+ * buffers may NOT be imported/exported or used for scanout (or any
+ * other situation where buffers can be indefinitely pinned, but
+ * cases other than scanout are all kernel owned BOs which are not
+ * visible to userspace).
+ *
+ * In exchange for those constraints, all private BOs associated with
+ * a single context (drm_file) share a single dma_resv, and if there
+ * has been no eviction since the last submit, there are no per-BO
+ * bookeeping to do, significantly cutting the SUBMIT overhead.
+ */
+#define MSM_BO_NO_SHARE 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -123,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
+ MSM_BO_NO_SHARE | \
MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
@@ -180,6 +220,17 @@ struct drm_msm_gem_cpu_fini {
* Cmdstream Submission:
*/
+#define MSM_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SYNCOBJ_FLAGS ( \
+ MSM_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* The value written into the cmdstream is logically:
*
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
@@ -221,7 +272,10 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 relocs; /* in, ptr to array of submit_reloc's */
+ union {
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+ __u64 iova; /* cmdstream address (for VM_BIND contexts) */
+ };
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -269,17 +323,6 @@ struct drm_msm_gem_submit_bo {
MSM_SUBMIT_FENCE_SN_IN | \
0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
-#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
- MSM_SUBMIT_SYNCOBJ_RESET | \
- 0)
-
-struct drm_msm_gem_submit_syncobj {
- __u32 handle; /* in, syncobj handle. */
- __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
- __u64 point; /* in, timepoint for timeline syncobjs. */
-};
-
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -293,13 +336,80 @@ struct drm_msm_gem_submit {
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_syncobj */
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
__u32 pad; /*in, reserved for future use, always 0. */
+};
+#define MSM_VM_BIND_OP_UNMAP 0
+#define MSM_VM_BIND_OP_MAP 1
+#define MSM_VM_BIND_OP_MAP_NULL 2
+
+#define MSM_VM_BIND_OP_DUMP 1
+#define MSM_VM_BIND_OP_FLAGS ( \
+ MSM_VM_BIND_OP_DUMP | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind_op - bind/unbind op to run
+ */
+struct drm_msm_vm_bind_op {
+ /** @op: one of MSM_VM_BIND_OP_x */
+ __u32 op;
+ /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */
+ __u32 handle;
+ /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */
+ __u64 obj_offset;
+ /** @iova: Address to operate on */
+ __u64 iova;
+ /** @range: Number of bites to to map/unmap */
+ __u64 range;
+ /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */
+ __u32 flags;
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+#define MSM_VM_BIND_FENCE_FD_IN 0x00000001
+#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002
+#define MSM_VM_BIND_FLAGS ( \
+ MSM_VM_BIND_FENCE_FD_IN | \
+ MSM_VM_BIND_FENCE_FD_OUT | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND
+ */
+struct drm_msm_vm_bind {
+ /** @flags: in, bitmask of MSM_VM_BIND_x */
+ __u32 flags;
+ /** @nr_ops: the number of bind ops in this ioctl */
+ __u32 nr_ops;
+ /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */
+ __s32 fence_fd;
+ /** @queue_id: in, submitqueue id */
+ __u32 queue_id;
+ /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 in_syncobjs;
+ /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 out_syncobjs;
+ /** @nr_in_syncobjs: in, number of entries in in_syncobj */
+ __u32 nr_in_syncobjs;
+ /** @nr_out_syncobjs: in, number of entries in out_syncobj */
+ __u32 nr_out_syncobjs;
+ /** @syncobj_stride: in, stride of syncobj arrays */
+ __u32 syncobj_stride;
+ /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */
+ __u32 op_stride;
+ union {
+ /** @op: used if num_ops == 1 */
+ struct drm_msm_vm_bind_op op;
+ /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */
+ __u64 ops;
+ };
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
@@ -345,12 +455,19 @@ struct drm_msm_gem_madvise {
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
- * backwards compatibility as a "default" submitqueue
+ * backwards compatibility as a "default" submitqueue.
+ *
+ * Because VM_BIND async updates happen on the CPU, they must run on a
+ * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had
+ * a way to do pgtable updates on the GPU, we could drop this restriction.
*/
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
+#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */
+
#define MSM_SUBMITQUEUE_FLAGS ( \
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
+ MSM_SUBMITQUEUE_VM_BIND | \
0)
/*
@@ -388,6 +505,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
+#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
@@ -401,6 +519,7 @@ struct drm_msm_submitqueue_query {
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
+#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/nova_drm.h b/include/uapi/drm/nova_drm.h
new file mode 100644
index 000000000000..3ca90ed9d2bb
--- /dev/null
+++ b/include/uapi/drm/nova_drm.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOVA_DRM_H__
+#define __NOVA_DRM_H__
+
+#include "drm.h"
+
+/* DISCLAIMER: Do not use, this is not a stable uAPI.
+ *
+ * This uAPI serves only testing purposes as long as this driver is still in
+ * development. It is required to implement and test infrastructure which is
+ * upstreamed in the context of this driver. See also [1].
+ *
+ * [1] https://lore.kernel.org/dri-devel/Zfsj0_tb-0-tNrJy@cassiopeiae/T/#u
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * NOVA_GETPARAM_VRAM_BAR_SIZE
+ *
+ * Query the VRAM BAR size in bytes.
+ */
+#define NOVA_GETPARAM_VRAM_BAR_SIZE 0x1
+
+/**
+ * struct drm_nova_getparam - query GPU and driver metadata
+ */
+struct drm_nova_getparam {
+ /**
+ * @param: The identifier of the parameter to query.
+ */
+ __u64 param;
+
+ /**
+ * @value: The value for the specified parameter.
+ */
+ __u64 value;
+};
+
+/**
+ * struct drm_nova_gem_create - create a new DRM GEM object
+ */
+struct drm_nova_gem_create {
+ /**
+ * @handle: The handle of the new DRM GEM object.
+ */
+ __u32 handle;
+
+ /**
+ * @pad: 32 bit padding, should be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @size: The size of the new DRM GEM object.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_nova_gem_info - query DRM GEM object metadata
+ */
+struct drm_nova_gem_info {
+ /**
+ * @handle: The handle of the DRM GEM object to query.
+ */
+ __u32 handle;
+
+ /**
+ * @pad: 32 bit padding, should be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @size: The size of the DRM GEM obejct.
+ */
+ __u64 size;
+};
+
+#define DRM_NOVA_GETPARAM 0x00
+#define DRM_NOVA_GEM_CREATE 0x01
+#define DRM_NOVA_GEM_INFO 0x02
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+ DRM_IOCTL_NOVA_GETPARAM = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GETPARAM,
+ struct drm_nova_getparam),
+ DRM_IOCTL_NOVA_GEM_CREATE = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_CREATE,
+ struct drm_nova_gem_create),
+ DRM_IOCTL_NOVA_GEM_INFO = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_INFO,
+ struct drm_nova_gem_info),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __NOVA_DRM_H__ */
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 568724be6628..ed67510395bd 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -21,6 +21,7 @@ extern "C" {
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
+#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -29,6 +30,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
+#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -227,6 +229,25 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/**
+ * struct drm_panfrost_set_label_bo - ioctl argument for labelling Panfrost BOs.
+ */
+struct drm_panfrost_set_label_bo {
+ /** @handle: Handle of the buffer object to label. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /**
+ * @label: User pointer to a NUL-terminated string
+ *
+ * Length cannot be greater than 4096.
+ * NULL is permitted and means clear the label.
+ */
+ __u64 label;
+};
+
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 97e2c4510e69..e1f43deb7eca 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -127,6 +127,23 @@ enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
DRM_PANTHOR_TILER_HEAP_DESTROY,
+
+ /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
+ DRM_PANTHOR_BO_SET_LABEL,
+
+ /**
+ * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
+ *
+ * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
+ * type seen by the process that manipulates the FD, such that a 32-bit process can
+ * always map the user MMIO ranges. But this approach doesn't work well for emulators
+ * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
+ * code. In that case, the kernel thinks it's the 64-bit process and assumes
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
+ * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
+ * pgoff_t size.
+ */
+ DRM_PANTHOR_SET_USER_MMIO_OFFSET,
};
/**
@@ -293,6 +310,9 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
+ /** @pad0: MBZ. */
+ __u32 pad0;
+
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -978,6 +998,46 @@ struct drm_panthor_tiler_heap_destroy {
};
/**
+ * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
+ */
+struct drm_panthor_bo_set_label {
+ /** @handle: Handle of the buffer object to label. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /**
+ * @label: User pointer to a NUL-terminated string
+ *
+ * Length cannot be greater than 4096
+ */
+ __u64 label;
+};
+
+/**
+ * struct drm_panthor_set_user_mmio_offset - Arguments passed to
+ * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
+ *
+ * This ioctl is only really useful if you want to support userspace
+ * CPU emulation environments where the size of an unsigned long differs
+ * between the host and the guest architectures.
+ */
+struct drm_panthor_set_user_mmio_offset {
+ /**
+ * @offset: User MMIO offset to use.
+ *
+ * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
+ *
+ * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
+ * OFFSET_64BIT based on the size of an unsigned long) unless you
+ * have a very good reason to overrule this decision.
+ */
+ __u64 offset;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1019,6 +1079,10 @@ enum {
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
+ DRM_IOCTL_PANTHOR_BO_SET_LABEL =
+ DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
+ DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
+ DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index c2ce71987e9b..9debb320c34b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -163,6 +163,12 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
+#define VIRTGPU_DRM_CAPSET_VIRGL 1
+#define VIRTGPU_DRM_CAPSET_VIRGL2 2
+#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
+#define VIRTGPU_DRM_CAPSET_VENUS 4
+#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
+#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 616916985e3f..e2426413488f 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -917,13 +917,17 @@ struct drm_xe_gem_mmap_offset {
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
*
* The @flags can be:
- * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
+ * space of the VM to scratch page. A vm_bind would overwrite the scratch
+ * page mapping. This flag is mutually exclusive with the
+ * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
+ * xe3 platform.
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1206,6 +1210,11 @@ struct drm_xe_vm_bind {
* there is no need to explicitly set that. When a queue of type
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
+ * The user is expected to query the PXP status via the query ioctl (see
+ * %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
+ * attempting to create a queue with this property. When a queue is created
+ * before PXP is ready, the ioctl will return -EBUSY if init is still in
+ * progress or -EIO if init failed.
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
@@ -1385,7 +1394,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1608,6 +1617,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
DRM_XE_OA_UNIT_TYPE_OAM,
+
+ /** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG,
};
/**
@@ -1629,6 +1641,7 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
+#define DRM_XE_OA_CAPS_OAM (1 << 4)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;