summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.h
blob: 53dfaa71a397cedd9b52923a3f0178873aaf9960 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// SPDX-License-Identifier: GPL-2.0
#ifndef IOU_KBUF_H
#define IOU_KBUF_H

#include <uapi/linux/io_uring.h>

struct io_buffer_list {
	/*
	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
	 * then these are classic provided buffers and ->buf_list is used.
	 */
	union {
		struct list_head buf_list;
		struct {
			struct page **buf_pages;
			struct io_uring_buf_ring *buf_ring;
		};
		struct rcu_head rcu;
	};
	__u16 bgid;

	/* below is for ring provided buffers */
	__u16 buf_nr_pages;
	__u16 nr_entries;
	__u16 head;
	__u16 mask;

	/* ring mapped provided buffers */
	__u8 is_mapped;
	/* ring mapped provided buffers, but mmap'ed by application */
	__u8 is_mmap;
	/* bl is visible from an RCU point of view for lookup */
	__u8 is_ready;
};

struct io_buffer {
	struct list_head list;
	__u64 addr;
	__u32 len;
	__u16 bid;
	__u16 bgid;
};

void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
			      unsigned int issue_flags);
void io_destroy_buffers(struct io_ring_ctx *ctx);

int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);

int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);

int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);

void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);

unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);

bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);

void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);

static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
{
	/*
	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
	 * the flag and hence ensure that bl->head doesn't get incremented.
	 * If the tail has already been incremented, hang on to it.
	 * The exception is partial io, that case we should increment bl->head
	 * to monopolize the buffer.
	 */
	if (req->buf_list) {
		if (req->flags & REQ_F_PARTIAL_IO) {
			/*
			 * If we end up here, then the io_uring_lock has
			 * been kept held since we retrieved the buffer.
			 * For the io-wq case, we already cleared
			 * req->buf_list when the buffer was retrieved,
			 * hence it cannot be set here for that case.
			 */
			req->buf_list->head++;
			req->buf_list = NULL;
		} else {
			req->buf_index = req->buf_list->bgid;
			req->flags &= ~REQ_F_BUFFER_RING;
			return true;
		}
	}
	return false;
}

static inline bool io_do_buffer_select(struct io_kiocb *req)
{
	if (!(req->flags & REQ_F_BUFFER_SELECT))
		return false;
	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}

static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
	if (req->flags & REQ_F_BUFFER_SELECTED)
		return io_kbuf_recycle_legacy(req, issue_flags);
	if (req->flags & REQ_F_BUFFER_RING)
		return io_kbuf_recycle_ring(req);
	return false;
}

static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
					      struct list_head *list)
{
	unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);

	if (req->flags & REQ_F_BUFFER_RING) {
		if (req->buf_list) {
			req->buf_index = req->buf_list->bgid;
			req->buf_list->head++;
		}
		req->flags &= ~REQ_F_BUFFER_RING;
	} else {
		req->buf_index = req->kbuf->bgid;
		list_add(&req->kbuf->list, list);
		req->flags &= ~REQ_F_BUFFER_SELECTED;
	}

	return ret;
}

static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
{
	lockdep_assert_held(&req->ctx->completion_lock);

	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
		return 0;
	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
}

static inline unsigned int io_put_kbuf(struct io_kiocb *req,
				       unsigned issue_flags)
{

	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
		return 0;
	return __io_put_kbuf(req, issue_flags);
}
#endif