summaryrefslogtreecommitdiff
path: root/include/uapi/linux/io_uring.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi/linux/io_uring.h')
-rw-r--r--include/uapi/linux/io_uring.h204
1 files changed, 189 insertions, 15 deletions
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 7bd10201a02b..e11c82638527 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -72,6 +72,7 @@ struct io_uring_sqe {
__u32 waitid_flags;
__u32 futex_flags;
__u32 install_fd_flags;
+ __u32 nop_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -97,6 +98,10 @@ struct io_uring_sqe {
__u64 addr3;
__u64 __pad2[1];
};
+ struct {
+ __u64 attr_ptr; /* pointer to attribute information */
+ __u64 attr_type_mask; /* bit mask of attributes */
+ };
__u64 optval;
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
@@ -106,6 +111,18 @@ struct io_uring_sqe {
};
};
+/* sqe->attr_type_mask flags */
+#define IORING_RW_ATTR_FLAG_PI (1U << 0)
+/* PI attribute information */
+struct io_uring_attr_pi {
+ __u16 flags;
+ __u16 app_tag;
+ __u32 len;
+ __u64 addr;
+ __u64 seed;
+ __u64 rsvd;
+};
+
/*
* If sqe->file_index is set to this for opcodes that instantiate a new
* direct descriptor (like openat/openat2/accept), then io_uring will allocate
@@ -115,7 +132,7 @@ struct io_uring_sqe {
*/
#define IORING_FILE_INDEX_ALLOC (~0U)
-enum {
+enum io_uring_sqe_flags_bit {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
@@ -199,6 +216,9 @@ enum {
*/
#define IORING_SETUP_NO_SQARRAY (1U << 16)
+/* Use hybrid poll in iopoll process */
+#define IORING_SETUP_HYBRID_IOPOLL (1U << 17)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -256,6 +276,8 @@ enum io_uring_op {
IORING_OP_FUTEX_WAITV,
IORING_OP_FIXED_FD_INSTALL,
IORING_OP_FTRUNCATE,
+ IORING_OP_BIND,
+ IORING_OP_LISTEN,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -351,11 +373,20 @@ enum io_uring_op {
* 0 is reported if zerocopy was actually possible.
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially).
+ *
+ * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
+ * recv will grab as many buffers from the buffer
+ * group ID given and send them all. The completion
+ * result will be the number of buffers send, with
+ * the starting buffer ID in cqe->flags as per
+ * usual for provided buffer usage. The buffers
+ * will be contigious from the starting buffer ID.
*/
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1)
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
+#define IORING_RECVSEND_BUNDLE (1U << 4)
/*
* cqe.res for IORING_CQE_F_NOTIF if
@@ -370,11 +401,13 @@ enum io_uring_op {
* accept flags stored in sqe->ioprio
*/
#define IORING_ACCEPT_MULTISHOT (1U << 0)
+#define IORING_ACCEPT_DONTWAIT (1U << 1)
+#define IORING_ACCEPT_POLL_FIRST (1U << 2)
/*
* IORING_OP_MSG_RING command types, stored in sqe->addr
*/
-enum {
+enum io_uring_msg_ring_flags {
IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
IORING_MSG_SEND_FD, /* send a registered fd to another ring */
};
@@ -397,10 +430,20 @@ enum {
#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
/*
+ * IORING_OP_NOP flags (sqe->nop_flags)
+ *
+ * IORING_NOP_INJECT_RESULT Inject result from sqe->result
+ */
+#define IORING_NOP_INJECT_RESULT (1U << 0)
+#define IORING_NOP_FILE (1U << 1)
+#define IORING_NOP_FIXED_FILE (1U << 2)
+#define IORING_NOP_FIXED_BUFFER (1U << 3)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
- __u64 user_data; /* sqe->data submission passed back */
+ __u64 user_data; /* sqe->user_data value passed back */
__s32 res; /* result code for this event */
__u32 flags;
@@ -419,15 +462,23 @@ struct io_uring_cqe {
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
+ * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
+ * more completions. In other words, the buffer is being
+ * partially consumed, and will be used by the kernel for
+ * more completions. This is only set for buffers used via
+ * the incremental buffer consumption, as provided by
+ * a ring buffer setup with IOU_PBUF_RING_INC. For any
+ * other provided buffer type, all completions with a
+ * buffer passed back is automatically returned to the
+ * application.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
+#define IORING_CQE_F_BUF_MORE (1U << 4)
-enum {
- IORING_CQE_BUFFER_SHIFT = 16,
-};
+#define IORING_CQE_BUFFER_SHIFT 16
/*
* Magic offsets for the application to mmap the data it needs
@@ -488,6 +539,8 @@ struct io_cqring_offsets {
#define IORING_ENTER_SQ_WAIT (1U << 2)
#define IORING_ENTER_EXT_ARG (1U << 3)
#define IORING_ENTER_REGISTERED_RING (1U << 4)
+#define IORING_ENTER_ABS_TIMER (1U << 5)
+#define IORING_ENTER_EXT_ARG_REG (1U << 6)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -522,11 +575,14 @@ struct io_uring_params {
#define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
+#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
+#define IORING_FEAT_MIN_TIMEOUT (1U << 15)
+#define IORING_FEAT_RW_ATTR (1U << 16)
/*
* io_uring_register(2) opcodes and arguments
*/
-enum {
+enum io_uring_register_op {
IORING_REGISTER_BUFFERS = 0,
IORING_UNREGISTER_BUFFERS = 1,
IORING_REGISTER_FILES = 2,
@@ -575,6 +631,21 @@ enum {
IORING_REGISTER_NAPI = 27,
IORING_UNREGISTER_NAPI = 28,
+ IORING_REGISTER_CLOCK = 29,
+
+ /* clone registered buffers from source ring to current ring */
+ IORING_REGISTER_CLONE_BUFFERS = 30,
+
+ /* send MSG_RING without having a ring */
+ IORING_REGISTER_SEND_MSG_RING = 31,
+
+ /* 32 reserved for zc rx */
+
+ /* resize CQ ring */
+ IORING_REGISTER_RESIZE_RINGS = 33,
+
+ IORING_REGISTER_MEM_REGION = 34,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -583,7 +654,7 @@ enum {
};
/* io-wq worker categories */
-enum {
+enum io_wq_type {
IO_WQ_BOUND,
IO_WQ_UNBOUND,
};
@@ -595,6 +666,31 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
+enum {
+ /* initialise with user provided memory pointed by user_addr */
+ IORING_MEM_REGION_TYPE_USER = 1,
+};
+
+struct io_uring_region_desc {
+ __u64 user_addr;
+ __u64 size;
+ __u32 flags;
+ __u32 id;
+ __u64 mmap_offset;
+ __u64 __resv[4];
+};
+
+enum {
+ /* expose the region as registered wait arguments */
+ IORING_MEM_REGION_REG_WAIT_ARG = 1,
+};
+
+struct io_uring_mem_region_reg {
+ __u64 region_uptr; /* struct io_uring_region_desc * */
+ __u64 flags;
+ __u64 __resv[2];
+};
+
/*
* Register a fully sparse file space, rather than pass in an array of all
* -1 file descriptors.
@@ -655,6 +751,25 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_clock_register {
+ __u32 clockid;
+ __u32 __resv[3];
+};
+
+enum {
+ IORING_REGISTER_SRC_REGISTERED = (1U << 0),
+ IORING_REGISTER_DST_REPLACE = (1U << 1),
+};
+
+struct io_uring_clone_buffers {
+ __u32 src_fd;
+ __u32 flags;
+ __u32 src_off;
+ __u32 dst_off;
+ __u32 nr;
+ __u32 pad[3];
+};
+
struct io_uring_buf {
__u64 addr;
__u32 len;
@@ -687,9 +802,17 @@ struct io_uring_buf_ring {
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
- */
-enum {
+ * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
+ * consumed incrementally. Normally one (or more) buffers
+ * are fully consumed. With incremental consumptions, it's
+ * feasible to register big ranges of buffers, and each
+ * use of it will consume only as much as it needs. This
+ * requires that both the kernel and application keep
+ * track of where the current read/recv index is at.
+ */
+enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
+ IOU_PBUF_RING_INC = 2,
};
/* argument for IORING_(UN)REGISTER_PBUF_RING */
@@ -708,18 +831,46 @@ struct io_uring_buf_status {
__u32 resv[8];
};
+enum io_uring_napi_op {
+ /* register/ungister backward compatible opcode */
+ IO_URING_NAPI_REGISTER_OP = 0,
+
+ /* opcodes to update napi_list when static tracking is used */
+ IO_URING_NAPI_STATIC_ADD_ID = 1,
+ IO_URING_NAPI_STATIC_DEL_ID = 2
+};
+
+enum io_uring_napi_tracking_strategy {
+ /* value must be 0 for backward compatibility */
+ IO_URING_NAPI_TRACKING_DYNAMIC = 0,
+ IO_URING_NAPI_TRACKING_STATIC = 1,
+ IO_URING_NAPI_TRACKING_INACTIVE = 255
+};
+
/* argument for IORING_(UN)REGISTER_NAPI */
struct io_uring_napi {
__u32 busy_poll_to;
__u8 prefer_busy_poll;
- __u8 pad[3];
- __u64 resv;
+
+ /* a io_uring_napi_op value */
+ __u8 opcode;
+ __u8 pad[2];
+
+ /*
+ * for IO_URING_NAPI_REGISTER_OP, it is a
+ * io_uring_napi_tracking_strategy value.
+ *
+ * for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID
+ * it is the napi id to add/del from napi_list.
+ */
+ __u32 op_param;
+ __u32 resv;
};
/*
* io_uring_restriction->opcode values
*/
-enum {
+enum io_uring_register_restriction_op {
/* Allow an io_uring_register(2) opcode */
IORING_RESTRICTION_REGISTER_OP = 0,
@@ -735,10 +886,33 @@ enum {
IORING_RESTRICTION_LAST
};
+enum {
+ IORING_REG_WAIT_TS = (1U << 0),
+};
+
+/*
+ * Argument for io_uring_enter(2) with
+ * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument
+ * is an index into a previously registered fixed wait region described by
+ * the below structure.
+ */
+struct io_uring_reg_wait {
+ struct __kernel_timespec ts;
+ __u32 min_wait_usec;
+ __u32 flags;
+ __u64 sigmask;
+ __u32 sigmask_sz;
+ __u32 pad[3];
+ __u64 pad2[2];
+};
+
+/*
+ * Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG
+ */
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
- __u32 pad;
+ __u32 min_wait_usec;
__u64 ts;
};
@@ -775,7 +949,7 @@ struct io_uring_recvmsg_out {
/*
* Argument for IORING_OP_URING_CMD when file is a socket
*/
-enum {
+enum io_uring_socket_op {
SOCKET_URING_OP_SIOCINQ = 0,
SOCKET_URING_OP_SIOCOUTQ,
SOCKET_URING_OP_GETSOCKOPT,