diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 13:20:44 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 13:20:44 -0700 |
commit | b349b1181d24af1c151134a3c39725e94a5619dd (patch) | |
tree | 7347cc4035de947c22e575ac7c649c0fa8658dd1 /io_uring/filetable.c | |
parent | efb2883060afc79638bb1eb19e2c30e7f6c5a178 (diff) | |
parent | f6b543fd03d347e8bf245cee4f2d54eb6ffd8fcb (diff) |
Merge tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe:
- As per (valid) complaint in the last merge window, fs/io_uring.c has
grown quite large these days. io_uring isn't really tied to fs
either, as it supports a wide variety of functionality outside of
that.
Move the code to io_uring/ and split it into files that either
implement a specific request type, and split some code into helpers
as well. The code is organized a lot better like this, and io_uring.c
is now < 4K LOC (me).
- Deprecate the epoll_ctl opcode. It'll still work, just trigger a
warning once if used. If we don't get any complaints on this, and I
don't expect any, then we can fully remove it in a future release
(me).
- Improve the cancel hash locking (Hao)
- kbuf cleanups (Hao)
- Efficiency improvements to the task_work handling (Dylan, Pavel)
- Provided buffer improvements (Dylan)
- Add support for recv/recvmsg multishot support. This is similar to
the accept (or poll) support for have for multishot, where a single
SQE can trigger everytime data is received. For applications that
expect to do more than a few receives on an instantiated socket, this
greatly improves efficiency (Dylan).
- Efficiency improvements for poll handling (Pavel)
- Poll cancelation improvements (Pavel)
- Allow specifiying a range for direct descriptor allocations (Pavel)
- Cleanup the cqe32 handling (Pavel)
- Move io_uring types to greatly cleanup the tracing (Pavel)
- Tons of great code cleanups and improvements (Pavel)
- Add a way to do sync cancelations rather than through the sqe -> cqe
interface, as that's a lot easier to use for some use cases (me).
- Add support to IORING_OP_MSG_RING for sending direct descriptors to a
different ring. This avoids the usually problematic SCM case, as we
disallow those. (me)
- Make the per-command alloc cache we use for apoll generic, place
limits on it, and use it for netmsg as well (me).
- Various cleanups (me, Michal, Gustavo, Uros)
* tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block: (172 commits)
io_uring: ensure REQ_F_ISREG is set async offload
net: fix compat pointer in get_compat_msghdr()
io_uring: Don't require reinitable percpu_ref
io_uring: fix types in io_recvmsg_multishot_overflow
io_uring: Use atomic_long_try_cmpxchg in __io_account_mem
io_uring: support multishot in recvmsg
net: copy from user before calling __get_compat_msghdr
net: copy from user before calling __copy_msghdr
io_uring: support 0 length iov in buffer select in compat
io_uring: fix multishot ending when not polled
io_uring: add netmsg cache
io_uring: impose max limit on apoll cache
io_uring: add abstraction around apoll cache
io_uring: move apoll cache to poll.c
io_uring: consolidate hash_locked io-wq handling
io_uring: clear REQ_F_HASH_LOCKED on hash removal
io_uring: don't race double poll setting REQ_F_ASYNC_DATA
io_uring: don't miss setting REQ_F_DOUBLE_POLL
io_uring: disable multishot recvmsg
io_uring: only trace one of complete or overflow
...
Diffstat (limited to 'io_uring/filetable.c')
-rw-r--r-- | io_uring/filetable.c | 193 |
1 files changed, 193 insertions, 0 deletions
diff --git a/io_uring/filetable.c b/io_uring/filetable.c new file mode 100644 index 000000000000..7b473259f3f4 --- /dev/null +++ b/io_uring/filetable.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/nospec.h> +#include <linux/io_uring.h> + +#include <uapi/linux/io_uring.h> + +#include "io_uring.h" +#include "rsrc.h" +#include "filetable.h" + +static int io_file_bitmap_get(struct io_ring_ctx *ctx) +{ + struct io_file_table *table = &ctx->file_table; + unsigned long nr = ctx->file_alloc_end; + int ret; + + do { + ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint); + if (ret != nr) + return ret; + + if (table->alloc_hint == ctx->file_alloc_start) + break; + nr = table->alloc_hint; + table->alloc_hint = ctx->file_alloc_start; + } while (1); + + return -ENFILE; +} + +bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files) +{ + table->files = kvcalloc(nr_files, sizeof(table->files[0]), + GFP_KERNEL_ACCOUNT); + if (unlikely(!table->files)) + return false; + + table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT); + if (unlikely(!table->bitmap)) { + kvfree(table->files); + return false; + } + + return true; +} + +void io_free_file_tables(struct io_file_table *table) +{ + kvfree(table->files); + bitmap_free(table->bitmap); + table->files = NULL; + table->bitmap = NULL; +} + +static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, + u32 slot_index) + __must_hold(&req->ctx->uring_lock) +{ + bool needs_switch = false; + struct io_fixed_file *file_slot; + int ret; + + if (io_is_uring_fops(file)) + return -EBADF; + if (!ctx->file_data) + return -ENXIO; + if (slot_index >= ctx->nr_user_files) + return -EINVAL; + + slot_index = array_index_nospec(slot_index, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, slot_index); + + if (file_slot->file_ptr) { + struct file *old_file; + + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + + old_file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, slot_index, + ctx->rsrc_node, old_file); + if (ret) + goto err; + file_slot->file_ptr = 0; + io_file_bitmap_clear(&ctx->file_table, slot_index); + needs_switch = true; + } + + ret = io_scm_file_account(ctx, file); + if (!ret) { + *io_get_tag_slot(ctx->file_data, slot_index) = 0; + io_fixed_file_set(file_slot, file); + io_file_bitmap_set(&ctx->file_table, slot_index); + } +err: + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->file_data); + if (ret) + fput(file); + return ret; +} + +int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file, + unsigned int file_slot) +{ + bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC; + int ret; + + if (alloc_slot) { + ret = io_file_bitmap_get(ctx); + if (unlikely(ret < 0)) + return ret; + file_slot = ret; + } else { + file_slot--; + } + + ret = io_install_fixed_file(ctx, file, file_slot); + if (!ret && alloc_slot) + ret = file_slot; + return ret; +} +/* + * Note when io_fixed_fd_install() returns error value, it will ensure + * fput() is called correspondingly. + */ +int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags, + struct file *file, unsigned int file_slot) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + io_ring_submit_lock(ctx, issue_flags); + ret = __io_fixed_fd_install(ctx, file, file_slot); + io_ring_submit_unlock(ctx, issue_flags); + + if (unlikely(ret < 0)) + fput(file); + return ret; +} + +int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset) +{ + struct io_fixed_file *file_slot; + struct file *file; + int ret; + + if (unlikely(!ctx->file_data)) + return -ENXIO; + if (offset >= ctx->nr_user_files) + return -EINVAL; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + return ret; + + offset = array_index_nospec(offset, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, offset); + if (!file_slot->file_ptr) + return -EBADF; + + file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file); + if (ret) + return ret; + + file_slot->file_ptr = 0; + io_file_bitmap_clear(&ctx->file_table, offset); + io_rsrc_node_switch(ctx, ctx->file_data); + return 0; +} + +int io_register_file_alloc_range(struct io_ring_ctx *ctx, + struct io_uring_file_index_range __user *arg) +{ + struct io_uring_file_index_range range; + u32 end; + + if (copy_from_user(&range, arg, sizeof(range))) + return -EFAULT; + if (check_add_overflow(range.off, range.len, &end)) + return -EOVERFLOW; + if (range.resv || end > ctx->nr_user_files) + return -EINVAL; + + io_file_table_set_alloc_range(ctx, range.off, range.len); + return 0; +} |