summaryrefslogtreecommitdiff
path: root/include/uapi/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi/drm')
-rw-r--r--include/uapi/drm/amdgpu_drm.h72
-rw-r--r--include/uapi/drm/amdxdna_accel.h136
-rw-r--r--include/uapi/drm/drm.h63
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/panthor_drm.h3
-rw-r--r--include/uapi/drm/rocket_accel.h142
-rw-r--r--include/uapi/drm/v3d_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h282
8 files changed, 688 insertions, 20 deletions
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index bdedbaccf776..cd7402e36b6d 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -57,6 +57,7 @@ extern "C" {
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
+#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -77,6 +78,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
+#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
/**
* DOC: memory domains
@@ -103,6 +105,8 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
+ *
+ * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -111,13 +115,15 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
+#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL | \
+ AMDGPU_GEM_DOMAIN_MMIO_REMAP)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -800,6 +806,21 @@ union drm_amdgpu_wait_fences {
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
+
+struct drm_amdgpu_gem_vm_entry {
+ /* Start of mapping (in bytes) */
+ __u64 addr;
+
+ /* Size of mapping (in bytes) */
+ __u64 size;
+
+ /* Mapping offset */
+ __u64 offset;
+
+ /* flags needed to recreate mapping */
+ __u64 flags;
+};
/* Sets or returns a value associated with a buffer. */
struct drm_amdgpu_gem_op {
@@ -807,8 +828,44 @@ struct drm_amdgpu_gem_op {
__u32 handle;
/** AMDGPU_GEM_OP_* */
__u32 op;
- /** Input or return value */
+ /** Input or return value. For MAPPING_INFO op: pointer to array of struct drm_amdgpu_gem_vm_entry */
__u64 value;
+ /** For MAPPING_INFO op: number of mappings (in/out) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
+
+struct drm_amdgpu_gem_list_handles {
+ /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
+ __u64 entries;
+
+ /* Size of entries buffer / Number of handles in process (if larger than size of buffer, must retry) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+struct drm_amdgpu_gem_list_handles_entry {
+ /* gem handle of buffer object */
+ __u32 gem_handle;
+
+ /* Currently just one flag: IS_IMPORT */
+ __u32 flags;
+
+ /* Size of bo */
+ __u64 size;
+
+ /* Preferred domains for GEM_CREATE */
+ __u64 preferred_domains;
+
+ /* GEM_CREATE flags for re-creation of buffer */
+ __u64 alloc_flags;
+
+ /* physical start_addr alignment in bytes for some HW requirements */
+ __u64 alignment;
};
#define AMDGPU_VA_OP_MAP 1
@@ -1031,10 +1088,11 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
-#define AMDGPU_IDS_FLAGS_FUSION 0x1
-#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
-#define AMDGPU_IDS_FLAGS_TMZ 0x4
-#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
+#define AMDGPU_IDS_FLAGS_FUSION 0x01
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x02
+#define AMDGPU_IDS_FLAGS_TMZ 0x04
+#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x08
+#define AMDGPU_IDS_FLAGS_GANG_SUBMIT 0x10
/*
* Query h/w info: Flag identifying VF/PF/PT mode
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index a706ead39082..a1fb9785db77 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -34,6 +34,7 @@ enum amdxdna_drm_ioctl_id {
DRM_AMDXDNA_EXEC_CMD,
DRM_AMDXDNA_GET_INFO,
DRM_AMDXDNA_SET_STATE,
+ DRM_AMDXDNA_GET_ARRAY = 10,
};
/**
@@ -154,6 +155,31 @@ enum amdxdna_bo_type {
};
/**
+ * struct amdxdna_drm_va_entry
+ * @vaddr: Virtual address.
+ * @len: Size of entry.
+ */
+struct amdxdna_drm_va_entry {
+ __u64 vaddr;
+ __u64 len;
+};
+
+/**
+ * struct amdxdna_drm_va_tbl
+ * @dmabuf_fd: The fd of dmabuf.
+ * @num_entries: Number of va entries.
+ * @va_entries: Array of va entries.
+ *
+ * The input can be either a dmabuf fd or a virtual address entry table.
+ * When dmabuf_fd is used, num_entries must be zero.
+ */
+struct amdxdna_drm_va_tbl {
+ __s32 dmabuf_fd;
+ __u32 num_entries;
+ struct amdxdna_drm_va_entry va_entries[];
+};
+
+/**
* struct amdxdna_drm_create_bo - Create a buffer object.
* @flags: Buffer flags. MBZ.
* @vaddr: User VA of buffer if applied. MBZ.
@@ -430,6 +456,112 @@ struct amdxdna_drm_get_info {
__u64 buffer; /* in/out */
};
+#define AMDXDNA_HWCTX_STATE_IDLE 0
+#define AMDXDNA_HWCTX_STATE_ACTIVE 1
+
+/**
+ * struct amdxdna_drm_hwctx_entry - The hardware context array entry
+ */
+struct amdxdna_drm_hwctx_entry {
+ /** @context_id: Context ID. */
+ __u32 context_id;
+ /** @start_col: Start AIE array column assigned to context. */
+ __u32 start_col;
+ /** @num_col: Number of AIE array columns assigned to context. */
+ __u32 num_col;
+ /** @hwctx_id: The real hardware context id. */
+ __u32 hwctx_id;
+ /** @pid: ID of process which created this context. */
+ __s64 pid;
+ /** @command_submissions: Number of commands submitted. */
+ __u64 command_submissions;
+ /** @command_completions: Number of commands completed. */
+ __u64 command_completions;
+ /** @migrations: Number of times been migrated. */
+ __u64 migrations;
+ /** @preemptions: Number of times been preempted. */
+ __u64 preemptions;
+ /** @errors: Number of errors happened. */
+ __u64 errors;
+ /** @priority: Context priority. */
+ __u64 priority;
+ /** @heap_usage: Usage of device heap buffer. */
+ __u64 heap_usage;
+ /** @suspensions: Number of times been suspended. */
+ __u64 suspensions;
+ /**
+ * @state: Context state.
+ * %AMDXDNA_HWCTX_STATE_IDLE
+ * %AMDXDNA_HWCTX_STATE_ACTIVE
+ */
+ __u32 state;
+ /** @pasid: PASID been bound. */
+ __u32 pasid;
+ /** @gops: Giga operations per second. */
+ __u32 gops;
+ /** @fps: Frames per second. */
+ __u32 fps;
+ /** @dma_bandwidth: DMA bandwidth. */
+ __u32 dma_bandwidth;
+ /** @latency: Frame response latency. */
+ __u32 latency;
+ /** @frame_exec_time: Frame execution time. */
+ __u32 frame_exec_time;
+ /** @txn_op_idx: Index of last control code executed. */
+ __u32 txn_op_idx;
+ /** @ctx_pc: Program counter. */
+ __u32 ctx_pc;
+ /** @fatal_error_type: Fatal error type if context crashes. */
+ __u32 fatal_error_type;
+ /** @fatal_error_exception_type: Firmware exception type. */
+ __u32 fatal_error_exception_type;
+ /** @fatal_error_exception_pc: Firmware exception program counter. */
+ __u32 fatal_error_exception_pc;
+ /** @fatal_error_app_module: Exception module name. */
+ __u32 fatal_error_app_module;
+ /** @pad: Structure pad. */
+ __u32 pad;
+};
+
+#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
+
+/**
+ * struct amdxdna_drm_get_array - Get information array.
+ */
+struct amdxdna_drm_get_array {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_AMDXDNA_HW_CONTEXT_ALL:
+ * Returns all created hardware contexts.
+ */
+ __u32 param;
+ /**
+ * @element_size:
+ *
+ * Specifies maximum element size and returns the actual element size.
+ */
+ __u32 element_size;
+ /**
+ * @num_element:
+ *
+ * Specifies maximum number of elements and returns the actual number
+ * of elements.
+ */
+ __u32 num_element; /* in/out */
+ /** @pad: MBZ */
+ __u32 pad;
+ /**
+ * @buffer:
+ *
+ * Specifies the match conditions and returns the matched information
+ * array.
+ */
+ __u64 buffer;
+};
+
enum amdxdna_drm_set_param {
DRM_AMDXDNA_SET_POWER_MODE,
DRM_AMDXDNA_WRITE_AIE_MEM,
@@ -494,6 +626,10 @@ struct amdxdna_drm_set_power_mode {
DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SET_STATE, \
struct amdxdna_drm_set_state)
+#define DRM_IOCTL_AMDXDNA_GET_ARRAY \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_ARRAY, \
+ struct amdxdna_drm_get_array)
+
#if defined(__cplusplus)
} /* extern c end */
#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index e63a71d3c607..3cd5cf15e3c9 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -597,35 +597,66 @@ struct drm_set_version {
int drm_dd_minor;
};
-/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/**
+ * struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
+ * @handle: Handle of the object to be closed.
+ * @pad: Padding.
+ *
+ * Releases the handle to an mm object.
+ */
struct drm_gem_close {
- /** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+/**
+ * struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
+ * @handle: Handle for the object being named.
+ * @name: Returned global name.
+ *
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
struct drm_gem_flink {
- /** Handle for the object being named */
__u32 handle;
-
- /** Returned global name */
__u32 name;
};
-/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+/**
+ * struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
+ * @name: Name of object being opened.
+ * @handle: Returned handle for the object.
+ * @size: Returned size of the object
+ *
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
struct drm_gem_open {
- /** Name of object being opened */
__u32 name;
-
- /** Returned handle for the object */
__u32 handle;
-
- /** Returned size of the object */
__u64 size;
};
/**
+ * struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
+ * @handle: The handle of a gem object.
+ * @new_handle: An available gem handle.
+ *
+ * This ioctl changes the handle of a GEM object to the specified one.
+ * The new handle must be unused. On success the old handle is closed
+ * and all further IOCTL should refer to the new handle only.
+ * Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
+ */
+struct drm_gem_change_handle {
+ __u32 handle;
+ __u32 new_handle;
+};
+
+/**
* DRM_CAP_DUMB_BUFFER
*
* If set to 1, the driver supports creating dumb buffers via the
@@ -1309,6 +1340,14 @@ extern "C" {
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
+/**
+ * DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
+ *
+ * Some applications (notably CRIU) need objects to have specific gem handles.
+ * This ioctl changes the object at one gem handle to use a new gem handle.
+ */
+#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index c082810c08a8..a122bea25593 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -962,6 +962,14 @@ struct hdr_output_metadata {
* Request that the kernel sends back a vblank event (see
* struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
* page-flip is done.
+ *
+ * When used with atomic uAPI, one event will be delivered per CRTC included in
+ * the atomic commit. A CRTC is included in an atomic commit if one of its
+ * properties is set, or if a property is set on a connector or plane linked
+ * via the CRTC_ID property to the CRTC. At least one CRTC must be included,
+ * and all pulled in CRTCs must be either previously or newly powered on (in
+ * other words, a powered off CRTC which stays off cannot be included in the
+ * atomic commit).
*/
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
/**
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index e1f43deb7eca..467d365ed7ba 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -327,6 +327,9 @@ struct drm_panthor_gpu_info {
/** @pad: MBZ. */
__u32 pad;
+
+ /** @gpu_features: Bitmask describing supported GPU-wide features */
+ __u64 gpu_features;
};
/**
diff --git a/include/uapi/drm/rocket_accel.h b/include/uapi/drm/rocket_accel.h
new file mode 100644
index 000000000000..14b2e12b7c49
--- /dev/null
+++ b/include/uapi/drm/rocket_accel.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Tomeu Vizoso
+ */
+#ifndef __DRM_UAPI_ROCKET_ACCEL_H__
+#define __DRM_UAPI_ROCKET_ACCEL_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_ROCKET_CREATE_BO 0x00
+#define DRM_ROCKET_SUBMIT 0x01
+#define DRM_ROCKET_PREP_BO 0x02
+#define DRM_ROCKET_FINI_BO 0x03
+
+#define DRM_IOCTL_ROCKET_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_ROCKET_CREATE_BO, struct drm_rocket_create_bo)
+#define DRM_IOCTL_ROCKET_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_SUBMIT, struct drm_rocket_submit)
+#define DRM_IOCTL_ROCKET_PREP_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_PREP_BO, struct drm_rocket_prep_bo)
+#define DRM_IOCTL_ROCKET_FINI_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_FINI_BO, struct drm_rocket_fini_bo)
+
+/**
+ * struct drm_rocket_create_bo - ioctl argument for creating Rocket BOs.
+ *
+ */
+struct drm_rocket_create_bo {
+ /** Input: Size of the requested BO. */
+ __u32 size;
+
+ /** Output: GEM handle for the BO. */
+ __u32 handle;
+
+ /**
+ * Output: DMA address for the BO in the NPU address space. This address
+ * is private to the DRM fd and is valid for the lifetime of the GEM
+ * handle.
+ */
+ __u64 dma_address;
+
+ /** Output: Offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_rocket_prep_bo - ioctl argument for starting CPU ownership of the BO.
+ *
+ * Takes care of waiting for any NPU jobs that might still use the NPU and performs cache
+ * synchronization.
+ */
+struct drm_rocket_prep_bo {
+ /** Input: GEM handle of the buffer object. */
+ __u32 handle;
+
+ /** Reserved, must be zero. */
+ __u32 reserved;
+
+ /** Input: Amount of time to wait for NPU jobs. */
+ __s64 timeout_ns;
+};
+
+/**
+ * struct drm_rocket_fini_bo - ioctl argument for finishing CPU ownership of the BO.
+ *
+ * Synchronize caches for NPU access.
+ */
+struct drm_rocket_fini_bo {
+ /** Input: GEM handle of the buffer object. */
+ __u32 handle;
+
+ /** Reserved, must be zero. */
+ __u32 reserved;
+};
+
+/**
+ * struct drm_rocket_task - A task to be run on the NPU
+ *
+ * A task is the smallest unit of work that can be run on the NPU.
+ */
+struct drm_rocket_task {
+ /** Input: DMA address to NPU mapping of register command buffer */
+ __u32 regcmd;
+
+ /** Input: Number of commands in the register command buffer */
+ __u32 regcmd_count;
+};
+
+/**
+ * struct drm_rocket_job - A job to be run on the NPU
+ *
+ * The kernel will schedule the execution of this job taking into account its
+ * dependencies with other jobs. All tasks in the same job will be executed
+ * sequentially on the same core, to benefit from memory residency in SRAM.
+ */
+struct drm_rocket_job {
+ /** Input: Pointer to an array of struct drm_rocket_task. */
+ __u64 tasks;
+
+ /** Input: Pointer to a u32 array of the BOs that are read by the job. */
+ __u64 in_bo_handles;
+
+ /** Input: Pointer to a u32 array of the BOs that are written to by the job. */
+ __u64 out_bo_handles;
+
+ /** Input: Number of tasks passed in. */
+ __u32 task_count;
+
+ /** Input: Size in bytes of the structs in the @tasks field. */
+ __u32 task_struct_size;
+
+ /** Input: Number of input BO handles passed in (size is that times 4). */
+ __u32 in_bo_handle_count;
+
+ /** Input: Number of output BO handles passed in (size is that times 4). */
+ __u32 out_bo_handle_count;
+};
+
+/**
+ * struct drm_rocket_submit - ioctl argument for submitting commands to the NPU.
+ *
+ * The kernel will schedule the execution of these jobs in dependency order.
+ */
+struct drm_rocket_submit {
+ /** Input: Pointer to an array of struct drm_rocket_job. */
+ __u64 jobs;
+
+ /** Input: Number of jobs passed in. */
+ __u32 job_count;
+
+ /** Input: Size in bytes of the structs in the @jobs field. */
+ __u32 job_struct_size;
+
+ /** Reserved, must be zero. */
+ __u64 reserved;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __DRM_UAPI_ROCKET_ACCEL_H__ */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index dbbc404d2b3d..d9b01f4c3a04 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -294,6 +294,8 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
DRM_V3D_PARAM_MAX_PERF_COUNTERS,
DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES,
+ DRM_V3D_PARAM_GLOBAL_RESET_COUNTER,
+ DRM_V3D_PARAM_CONTEXT_RESET_COUNTER,
};
struct drm_v3d_get_param {
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index e2426413488f..40ff19f52a8d 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -760,7 +766,11 @@ struct drm_xe_device_query {
* gem creation
*
* The @flags can be:
- * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
+ * allocation strategy by deferring physical memory allocation
+ * until the object is either bound to a virtual memory region via
+ * VM_BIND or accessed by the CPU. As a result, no backing memory is
+ * reserved at the time of GEM object creation.
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
@@ -1003,6 +1013,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1122,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1974,6 +1989,271 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
+ * Support both GPU and CPU atomic operations for system allocator.
+ * Support GPU atomic operations for normal(bo) allocator.
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ /** @atomic: Atomic access policy */
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ /** @pat_index: Page attribute table index */
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ * void *ptr_start = ptr;
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr_start);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif