diff options
Diffstat (limited to 'tools/testing/selftests/ublk')
-rw-r--r-- | tools/testing/selftests/ublk/Makefile | 1 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/fault_inject.c | 17 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/file_backed.c | 46 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/kublk.c | 472 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/kublk.h | 180 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/null.c | 50 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/stripe.c | 46 | ||||
-rwxr-xr-x | tools/testing/selftests/ublk/test_common.sh | 5 | ||||
-rwxr-xr-x | tools/testing/selftests/ublk/test_generic_12.sh | 55 | ||||
-rwxr-xr-x | tools/testing/selftests/ublk/test_stress_03.sh | 11 | ||||
-rwxr-xr-x | tools/testing/selftests/ublk/test_stress_04.sh | 7 | ||||
-rwxr-xr-x | tools/testing/selftests/ublk/test_stress_05.sh | 7 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/trace/count_ios_per_tid.bt | 11 | ||||
-rw-r--r-- | tools/testing/selftests/ublk/utils.h | 70 |
14 files changed, 631 insertions, 347 deletions
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile index 4dde8838261d..5d7f4ecfb816 100644 --- a/tools/testing/selftests/ublk/Makefile +++ b/tools/testing/selftests/ublk/Makefile @@ -19,6 +19,7 @@ TEST_PROGS += test_generic_08.sh TEST_PROGS += test_generic_09.sh TEST_PROGS += test_generic_10.sh TEST_PROGS += test_generic_11.sh +TEST_PROGS += test_generic_12.sh TEST_PROGS += test_null_01.sh TEST_PROGS += test_null_02.sh diff --git a/tools/testing/selftests/ublk/fault_inject.c b/tools/testing/selftests/ublk/fault_inject.c index 5421774d7867..b227bd78b252 100644 --- a/tools/testing/selftests/ublk/fault_inject.c +++ b/tools/testing/selftests/ublk/fault_inject.c @@ -38,7 +38,8 @@ static int ublk_fault_inject_tgt_init(const struct dev_ctx *ctx, return 0; } -static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag) +static int ublk_fault_inject_queue_io(struct ublk_thread *t, + struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); struct io_uring_sqe *sqe; @@ -46,25 +47,27 @@ static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag) .tv_nsec = (long long)q->dev->private_data, }; - ublk_queue_alloc_sqes(q, &sqe, 1); + ublk_io_alloc_sqes(t, &sqe, 1); io_uring_prep_timeout(sqe, &ts, 1, 0); - sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, 1); + sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); - ublk_queued_tgt_io(q, tag, 1); + ublk_queued_tgt_io(t, q, tag, 1); return 0; } -static void ublk_fault_inject_tgt_io_done(struct ublk_queue *q, int tag, +static void ublk_fault_inject_tgt_io_done(struct ublk_thread *t, + struct ublk_queue *q, const struct io_uring_cqe *cqe) { + unsigned tag = user_data_to_tag(cqe->user_data); const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); if (cqe->res != -ETIME) ublk_err("%s: unexpected cqe res %d\n", __func__, cqe->res); - if (ublk_completed_tgt_io(q, tag)) - ublk_complete_io(q, tag, iod->nr_sectors << 9); + if (ublk_completed_tgt_io(t, q, tag)) + ublk_complete_io(t, q, tag, iod->nr_sectors << 9); else ublk_err("%s: io not complete after 1 cqe\n", __func__); } diff --git a/tools/testing/selftests/ublk/file_backed.c b/tools/testing/selftests/ublk/file_backed.c index 509842df9bee..2d93ac860bd5 100644 --- a/tools/testing/selftests/ublk/file_backed.c +++ b/tools/testing/selftests/ublk/file_backed.c @@ -13,20 +13,22 @@ static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int assert(0); } -static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) +static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q, + const struct ublksrv_io_desc *iod, int tag) { unsigned ublk_op = ublksrv_get_op(iod); struct io_uring_sqe *sqe[1]; - ublk_queue_alloc_sqes(q, sqe, 1); + ublk_io_alloc_sqes(t, sqe, 1); io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC); io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); /* bit63 marks us as tgt io */ - sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); return 1; } -static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) +static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, + const struct ublksrv_io_desc *iod, int tag) { unsigned ublk_op = ublksrv_get_op(iod); unsigned zc = ublk_queue_use_zc(q); @@ -36,7 +38,7 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; if (!zc || auto_zc) { - ublk_queue_alloc_sqes(q, sqe, 1); + ublk_io_alloc_sqes(t, sqe, 1); if (!sqe[0]) return -ENOMEM; @@ -48,31 +50,31 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de sqe[0]->buf_index = tag; io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); /* bit63 marks us as tgt io */ - sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); return 1; } - ublk_queue_alloc_sqes(q, sqe, 3); + ublk_io_alloc_sqes(t, sqe, 3); - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0, iod->nr_sectors << 9, iod->start_sector << 9); sqe[1]->buf_index = tag; sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; - sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); - io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); - sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); + io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); + sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); return 2; } -static int loop_queue_tgt_io(struct ublk_queue *q, int tag) +static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); unsigned ublk_op = ublksrv_get_op(iod); @@ -80,7 +82,7 @@ static int loop_queue_tgt_io(struct ublk_queue *q, int tag) switch (ublk_op) { case UBLK_IO_OP_FLUSH: - ret = loop_queue_flush_io(q, iod, tag); + ret = loop_queue_flush_io(t, q, iod, tag); break; case UBLK_IO_OP_WRITE_ZEROES: case UBLK_IO_OP_DISCARD: @@ -88,7 +90,7 @@ static int loop_queue_tgt_io(struct ublk_queue *q, int tag) break; case UBLK_IO_OP_READ: case UBLK_IO_OP_WRITE: - ret = loop_queue_tgt_rw_io(q, iod, tag); + ret = loop_queue_tgt_rw_io(t, q, iod, tag); break; default: ret = -EINVAL; @@ -100,17 +102,19 @@ static int loop_queue_tgt_io(struct ublk_queue *q, int tag) return ret; } -static int ublk_loop_queue_io(struct ublk_queue *q, int tag) +static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { - int queued = loop_queue_tgt_io(q, tag); + int queued = loop_queue_tgt_io(t, q, tag); - ublk_queued_tgt_io(q, tag, queued); + ublk_queued_tgt_io(t, q, tag, queued); return 0; } -static void ublk_loop_io_done(struct ublk_queue *q, int tag, +static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q, const struct io_uring_cqe *cqe) { + unsigned tag = user_data_to_tag(cqe->user_data); unsigned op = user_data_to_op(cqe->user_data); struct ublk_io *io = ublk_get_io(q, tag); @@ -126,8 +130,8 @@ static void ublk_loop_io_done(struct ublk_queue *q, int tag, if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF)) io->tgt_ios += 1; - if (ublk_completed_tgt_io(q, tag)) - ublk_complete_io(q, tag, io->result); + if (ublk_completed_tgt_io(t, q, tag)) + ublk_complete_io(t, q, tag, io->result); } static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c index b5131a000795..95188065b2e9 100644 --- a/tools/testing/selftests/ublk/kublk.c +++ b/tools/testing/selftests/ublk/kublk.c @@ -348,8 +348,8 @@ static void ublk_ctrl_dump(struct ublk_dev *dev) for (i = 0; i < info->nr_hw_queues; i++) { ublk_print_cpu_set(&affinity[i], buf, sizeof(buf)); - printf("\tqueue %u: tid %d affinity(%s)\n", - i, dev->q[i].tid, buf); + printf("\tqueue %u: affinity(%s)\n", + i, buf); } free(affinity); } @@ -412,16 +412,6 @@ static void ublk_queue_deinit(struct ublk_queue *q) int i; int nr_ios = q->q_depth; - io_uring_unregister_buffers(&q->ring); - - io_uring_unregister_ring_fd(&q->ring); - - if (q->ring.ring_fd > 0) { - io_uring_unregister_files(&q->ring); - close(q->ring.ring_fd); - q->ring.ring_fd = -1; - } - if (q->io_cmd_buf) munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q)); @@ -429,29 +419,32 @@ static void ublk_queue_deinit(struct ublk_queue *q) free(q->ios[i].buf_addr); } +static void ublk_thread_deinit(struct ublk_thread *t) +{ + io_uring_unregister_buffers(&t->ring); + + io_uring_unregister_ring_fd(&t->ring); + + if (t->ring.ring_fd > 0) { + io_uring_unregister_files(&t->ring); + close(t->ring.ring_fd); + t->ring.ring_fd = -1; + } +} + static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags) { struct ublk_dev *dev = q->dev; int depth = dev->dev_info.queue_depth; - int i, ret = -1; + int i; int cmd_buf_size, io_buf_size; unsigned long off; - int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; q->tgt_ops = dev->tgt.ops; - q->state = 0; + q->flags = 0; q->q_depth = depth; - q->cmd_inflight = 0; - q->tid = gettid(); - - if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { - q->state |= UBLKSRV_NO_BUF; - if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) - q->state |= UBLKSRV_ZC; - if (dev->dev_info.flags & UBLK_F_AUTO_BUF_REG) - q->state |= UBLKSRV_AUTO_BUF_REG; - } - q->state |= extra_flags; + q->flags = dev->dev_info.flags; + q->flags |= extra_flags; cmd_buf_size = ublk_queue_cmd_buf_sz(q); off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz(); @@ -466,9 +459,10 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags) io_buf_size = dev->dev_info.max_io_buf_bytes; for (i = 0; i < q->q_depth; i++) { q->ios[i].buf_addr = NULL; - q->ios[i].flags = UBLKSRV_NEED_FETCH_RQ | UBLKSRV_IO_FREE; + q->ios[i].flags = UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_FREE; + q->ios[i].tag = i; - if (q->state & UBLKSRV_NO_BUF) + if (ublk_queue_no_buf(q)) continue; if (posix_memalign((void **)&q->ios[i].buf_addr, @@ -479,39 +473,57 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags) } } - ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth, + return 0; + fail: + ublk_queue_deinit(q); + ublk_err("ublk dev %d queue %d failed\n", + dev->dev_info.dev_id, q->q_id); + return -ENOMEM; +} + +static int ublk_thread_init(struct ublk_thread *t) +{ + struct ublk_dev *dev = t->dev; + int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; + int ret; + + ret = ublk_setup_ring(&t->ring, ring_depth, cq_depth, IORING_SETUP_COOP_TASKRUN | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN); if (ret < 0) { - ublk_err("ublk dev %d queue %d setup io_uring failed %d\n", - q->dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d setup io_uring failed %d\n", + dev->dev_info.dev_id, t->idx, ret); goto fail; } if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { - ret = io_uring_register_buffers_sparse(&q->ring, q->q_depth); + unsigned nr_ios = dev->dev_info.queue_depth * dev->dev_info.nr_hw_queues; + unsigned max_nr_ios_per_thread = nr_ios / dev->nthreads; + max_nr_ios_per_thread += !!(nr_ios % dev->nthreads); + ret = io_uring_register_buffers_sparse( + &t->ring, max_nr_ios_per_thread); if (ret) { - ublk_err("ublk dev %d queue %d register spare buffers failed %d", - dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d register spare buffers failed %d", + dev->dev_info.dev_id, t->idx, ret); goto fail; } } - io_uring_register_ring_fd(&q->ring); + io_uring_register_ring_fd(&t->ring); - ret = io_uring_register_files(&q->ring, dev->fds, dev->nr_fds); + ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds); if (ret) { - ublk_err("ublk dev %d queue %d register files failed %d\n", - q->dev->dev_info.dev_id, q->q_id, ret); + ublk_err("ublk dev %d thread %d register files failed %d\n", + t->dev->dev_info.dev_id, t->idx, ret); goto fail; } return 0; - fail: - ublk_queue_deinit(q); - ublk_err("ublk dev %d queue %d failed\n", - dev->dev_info.dev_id, q->q_id); +fail: + ublk_thread_deinit(t); + ublk_err("ublk dev %d thread %d init failed\n", + dev->dev_info.dev_id, t->idx); return -ENOMEM; } @@ -562,23 +574,24 @@ static void ublk_set_auto_buf_reg(const struct ublk_queue *q, if (q->tgt_ops->buf_index) buf.index = q->tgt_ops->buf_index(q, tag); else - buf.index = tag; + buf.index = q->ios[tag].buf_index; - if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + if (ublk_queue_auto_zc_fallback(q)) buf.flags = UBLK_AUTO_BUF_REG_FALLBACK; sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf); } -int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) +int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io) { + struct ublk_queue *q = ublk_io_to_queue(io); struct ublksrv_io_cmd *cmd; struct io_uring_sqe *sqe[1]; unsigned int cmd_op = 0; __u64 user_data; /* only freed io can be issued */ - if (!(io->flags & UBLKSRV_IO_FREE)) + if (!(io->flags & UBLKS_IO_FREE)) return 0; /* @@ -586,23 +599,23 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) * getting data */ if (!(io->flags & - (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA))) + (UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_NEED_COMMIT_RQ_COMP | UBLKS_IO_NEED_GET_DATA))) return 0; - if (io->flags & UBLKSRV_NEED_GET_DATA) + if (io->flags & UBLKS_IO_NEED_GET_DATA) cmd_op = UBLK_U_IO_NEED_GET_DATA; - else if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) + else if (io->flags & UBLKS_IO_NEED_COMMIT_RQ_COMP) cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; - else if (io->flags & UBLKSRV_NEED_FETCH_RQ) + else if (io->flags & UBLKS_IO_NEED_FETCH_RQ) cmd_op = UBLK_U_IO_FETCH_REQ; - if (io_uring_sq_space_left(&q->ring) < 1) - io_uring_submit(&q->ring); + if (io_uring_sq_space_left(&t->ring) < 1) + io_uring_submit(&t->ring); - ublk_queue_alloc_sqes(q, sqe, 1); + ublk_io_alloc_sqes(t, sqe, 1); if (!sqe[0]) { - ublk_err("%s: run out of sqe %d, tag %d\n", - __func__, q->q_id, tag); + ublk_err("%s: run out of sqe. thread %u, tag %d\n", + __func__, t->idx, io->tag); return -1; } @@ -617,52 +630,84 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) sqe[0]->opcode = IORING_OP_URING_CMD; sqe[0]->flags = IOSQE_FIXED_FILE; sqe[0]->rw_flags = 0; - cmd->tag = tag; + cmd->tag = io->tag; cmd->q_id = q->q_id; - if (!(q->state & UBLKSRV_NO_BUF)) + if (!ublk_queue_no_buf(q)) cmd->addr = (__u64) (uintptr_t) io->buf_addr; else cmd->addr = 0; - if (q->state & UBLKSRV_AUTO_BUF_REG) - ublk_set_auto_buf_reg(q, sqe[0], tag); + if (ublk_queue_use_auto_zc(q)) + ublk_set_auto_buf_reg(q, sqe[0], io->tag); - user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0); + user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0); io_uring_sqe_set_data64(sqe[0], user_data); io->flags = 0; - q->cmd_inflight += 1; + t->cmd_inflight += 1; - ublk_dbg(UBLK_DBG_IO_CMD, "%s: (qid %d tag %u cmd_op %u) iof %x stopping %d\n", - __func__, q->q_id, tag, cmd_op, - io->flags, !!(q->state & UBLKSRV_QUEUE_STOPPING)); + ublk_dbg(UBLK_DBG_IO_CMD, "%s: (thread %u qid %d tag %u cmd_op %u) iof %x stopping %d\n", + __func__, t->idx, q->q_id, io->tag, cmd_op, + io->flags, !!(t->state & UBLKS_T_STOPPING)); return 1; } -static void ublk_submit_fetch_commands(struct ublk_queue *q) +static void ublk_submit_fetch_commands(struct ublk_thread *t) { - int i = 0; + struct ublk_queue *q; + struct ublk_io *io; + int i = 0, j = 0; - for (i = 0; i < q->q_depth; i++) - ublk_queue_io_cmd(q, &q->ios[i], i); + if (t->dev->per_io_tasks) { + /* + * Lexicographically order all the (qid,tag) pairs, with + * qid taking priority (so (1,0) > (0,1)). Then make + * this thread the daemon for every Nth entry in this + * list (N is the number of threads), starting at this + * thread's index. This ensures that each queue is + * handled by as many ublk server threads as possible, + * so that load that is concentrated on one or a few + * queues can make use of all ublk server threads. + */ + const struct ublksrv_ctrl_dev_info *dinfo = &t->dev->dev_info; + int nr_ios = dinfo->nr_hw_queues * dinfo->queue_depth; + for (i = t->idx; i < nr_ios; i += t->dev->nthreads) { + int q_id = i / dinfo->queue_depth; + int tag = i % dinfo->queue_depth; + q = &t->dev->q[q_id]; + io = &q->ios[tag]; + io->buf_index = j++; + ublk_queue_io_cmd(t, io); + } + } else { + /* + * Service exclusively the queue whose q_id matches our + * thread index. + */ + struct ublk_queue *q = &t->dev->q[t->idx]; + for (i = 0; i < q->q_depth; i++) { + io = &q->ios[i]; + io->buf_index = i; + ublk_queue_io_cmd(t, io); + } + } } -static int ublk_queue_is_idle(struct ublk_queue *q) +static int ublk_thread_is_idle(struct ublk_thread *t) { - return !io_uring_sq_ready(&q->ring) && !q->io_inflight; + return !io_uring_sq_ready(&t->ring) && !t->io_inflight; } -static int ublk_queue_is_done(struct ublk_queue *q) +static int ublk_thread_is_done(struct ublk_thread *t) { - return (q->state & UBLKSRV_QUEUE_STOPPING) && ublk_queue_is_idle(q); + return (t->state & UBLKS_T_STOPPING) && ublk_thread_is_idle(t); } -static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, - struct io_uring_cqe *cqe) +static inline void ublksrv_handle_tgt_cqe(struct ublk_thread *t, + struct ublk_queue *q, + struct io_uring_cqe *cqe) { - unsigned tag = user_data_to_tag(cqe->user_data); - if (cqe->res < 0 && cqe->res != -EAGAIN) ublk_err("%s: failed tgt io: res %d qid %u tag %u, cmd_op %u\n", __func__, cqe->res, q->q_id, @@ -670,149 +715,159 @@ static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q, user_data_to_op(cqe->user_data)); if (q->tgt_ops->tgt_io_done) - q->tgt_ops->tgt_io_done(q, tag, cqe); + q->tgt_ops->tgt_io_done(t, q, cqe); } -static void ublk_handle_cqe(struct io_uring *r, - struct io_uring_cqe *cqe, void *data) +static void ublk_handle_uring_cmd(struct ublk_thread *t, + struct ublk_queue *q, + const struct io_uring_cqe *cqe) { - struct ublk_queue *q = container_of(r, struct ublk_queue, ring); - unsigned tag = user_data_to_tag(cqe->user_data); - unsigned cmd_op = user_data_to_op(cqe->user_data); int fetch = (cqe->res != UBLK_IO_RES_ABORT) && - !(q->state & UBLKSRV_QUEUE_STOPPING); - struct ublk_io *io; - - if (cqe->res < 0 && cqe->res != -ENODEV) - ublk_err("%s: res %d userdata %llx queue state %x\n", __func__, - cqe->res, cqe->user_data, q->state); - - ublk_dbg(UBLK_DBG_IO_CMD, "%s: res %d (qid %d tag %u cmd_op %u target %d/%d) stopping %d\n", - __func__, cqe->res, q->q_id, tag, cmd_op, - is_target_io(cqe->user_data), - user_data_to_tgt_data(cqe->user_data), - (q->state & UBLKSRV_QUEUE_STOPPING)); - - /* Don't retrieve io in case of target io */ - if (is_target_io(cqe->user_data)) { - ublksrv_handle_tgt_cqe(q, cqe); - return; - } - - io = &q->ios[tag]; - q->cmd_inflight--; + !(t->state & UBLKS_T_STOPPING); + unsigned tag = user_data_to_tag(cqe->user_data); + struct ublk_io *io = &q->ios[tag]; if (!fetch) { - q->state |= UBLKSRV_QUEUE_STOPPING; - io->flags &= ~UBLKSRV_NEED_FETCH_RQ; + t->state |= UBLKS_T_STOPPING; + io->flags &= ~UBLKS_IO_NEED_FETCH_RQ; } if (cqe->res == UBLK_IO_RES_OK) { assert(tag < q->q_depth); if (q->tgt_ops->queue_io) - q->tgt_ops->queue_io(q, tag); + q->tgt_ops->queue_io(t, q, tag); } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { - io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE; - ublk_queue_io_cmd(q, io, tag); + io->flags |= UBLKS_IO_NEED_GET_DATA | UBLKS_IO_FREE; + ublk_queue_io_cmd(t, io); } else { /* * COMMIT_REQ will be completed immediately since no fetching * piggyback is required. * * Marking IO_FREE only, then this io won't be issued since - * we only issue io with (UBLKSRV_IO_FREE | UBLKSRV_NEED_*) + * we only issue io with (UBLKS_IO_FREE | UBLKSRV_NEED_*) * * */ - io->flags = UBLKSRV_IO_FREE; + io->flags = UBLKS_IO_FREE; } } -static int ublk_reap_events_uring(struct io_uring *r) +static void ublk_handle_cqe(struct ublk_thread *t, + struct io_uring_cqe *cqe, void *data) +{ + struct ublk_dev *dev = t->dev; + unsigned q_id = user_data_to_q_id(cqe->user_data); + struct ublk_queue *q = &dev->q[q_id]; + unsigned cmd_op = user_data_to_op(cqe->user_data); + + if (cqe->res < 0 && cqe->res != -ENODEV) + ublk_err("%s: res %d userdata %llx queue state %x\n", __func__, + cqe->res, cqe->user_data, q->flags); + + ublk_dbg(UBLK_DBG_IO_CMD, "%s: res %d (qid %d tag %u cmd_op %u target %d/%d) stopping %d\n", + __func__, cqe->res, q->q_id, user_data_to_tag(cqe->user_data), + cmd_op, is_target_io(cqe->user_data), + user_data_to_tgt_data(cqe->user_data), + (t->state & UBLKS_T_STOPPING)); + + /* Don't retrieve io in case of target io */ + if (is_target_io(cqe->user_data)) { + ublksrv_handle_tgt_cqe(t, q, cqe); + return; + } + + t->cmd_inflight--; + + ublk_handle_uring_cmd(t, q, cqe); +} + +static int ublk_reap_events_uring(struct ublk_thread *t) { struct io_uring_cqe *cqe; unsigned head; int count = 0; - io_uring_for_each_cqe(r, head, cqe) { - ublk_handle_cqe(r, cqe, NULL); + io_uring_for_each_cqe(&t->ring, head, cqe) { + ublk_handle_cqe(t, cqe, NULL); count += 1; } - io_uring_cq_advance(r, count); + io_uring_cq_advance(&t->ring, count); return count; } -static int ublk_process_io(struct ublk_queue *q) +static int ublk_process_io(struct ublk_thread *t) { int ret, reapped; - ublk_dbg(UBLK_DBG_QUEUE, "dev%d-q%d: to_submit %d inflight cmd %u stopping %d\n", - q->dev->dev_info.dev_id, - q->q_id, io_uring_sq_ready(&q->ring), - q->cmd_inflight, - (q->state & UBLKSRV_QUEUE_STOPPING)); + ublk_dbg(UBLK_DBG_THREAD, "dev%d-t%u: to_submit %d inflight cmd %u stopping %d\n", + t->dev->dev_info.dev_id, + t->idx, io_uring_sq_ready(&t->ring), + t->cmd_inflight, + (t->state & UBLKS_T_STOPPING)); - if (ublk_queue_is_done(q)) + if (ublk_thread_is_done(t)) return -ENODEV; - ret = io_uring_submit_and_wait(&q->ring, 1); - reapped = ublk_reap_events_uring(&q->ring); + ret = io_uring_submit_and_wait(&t->ring, 1); + reapped = ublk_reap_events_uring(t); - ublk_dbg(UBLK_DBG_QUEUE, "submit result %d, reapped %d stop %d idle %d\n", - ret, reapped, (q->state & UBLKSRV_QUEUE_STOPPING), - (q->state & UBLKSRV_QUEUE_IDLE)); + ublk_dbg(UBLK_DBG_THREAD, "submit result %d, reapped %d stop %d idle %d\n", + ret, reapped, (t->state & UBLKS_T_STOPPING), + (t->state & UBLKS_T_IDLE)); return reapped; } -static void ublk_queue_set_sched_affinity(const struct ublk_queue *q, +static void ublk_thread_set_sched_affinity(const struct ublk_thread *t, cpu_set_t *cpuset) { if (sched_setaffinity(0, sizeof(*cpuset), cpuset) < 0) - ublk_err("ublk dev %u queue %u set affinity failed", - q->dev->dev_info.dev_id, q->q_id); + ublk_err("ublk dev %u thread %u set affinity failed", + t->dev->dev_info.dev_id, t->idx); } -struct ublk_queue_info { - struct ublk_queue *q; - sem_t *queue_sem; +struct ublk_thread_info { + struct ublk_dev *dev; + unsigned idx; + sem_t *ready; cpu_set_t *affinity; - unsigned char auto_zc_fallback; }; static void *ublk_io_handler_fn(void *data) { - struct ublk_queue_info *info = data; - struct ublk_queue *q = info->q; - int dev_id = q->dev->dev_info.dev_id; - unsigned extra_flags = 0; + struct ublk_thread_info *info = data; + struct ublk_thread *t = &info->dev->threads[info->idx]; + int dev_id = info->dev->dev_info.dev_id; int ret; - if (info->auto_zc_fallback) - extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK; + t->dev = info->dev; + t->idx = info->idx; - ret = ublk_queue_init(q, extra_flags); + ret = ublk_thread_init(t); if (ret) { - ublk_err("ublk dev %d queue %d init queue failed\n", - dev_id, q->q_id); + ublk_err("ublk dev %d thread %u init failed\n", + dev_id, t->idx); return NULL; } /* IO perf is sensitive with queue pthread affinity on NUMA machine*/ - ublk_queue_set_sched_affinity(q, info->affinity); - sem_post(info->queue_sem); + if (info->affinity) + ublk_thread_set_sched_affinity(t, info->affinity); + sem_post(info->ready); - ublk_dbg(UBLK_DBG_QUEUE, "tid %d: ublk dev %d queue %d started\n", - q->tid, dev_id, q->q_id); + ublk_dbg(UBLK_DBG_THREAD, "tid %d: ublk dev %d thread %u started\n", + gettid(), dev_id, t->idx); /* submit all io commands to ublk driver */ - ublk_submit_fetch_commands(q); + ublk_submit_fetch_commands(t); do { - if (ublk_process_io(q) < 0) + if (ublk_process_io(t) < 0) break; } while (1); - ublk_dbg(UBLK_DBG_QUEUE, "ublk dev %d queue %d exited\n", dev_id, q->q_id); - ublk_queue_deinit(q); + ublk_dbg(UBLK_DBG_THREAD, "tid %d: ublk dev %d thread %d exiting\n", + gettid(), dev_id, t->idx); + ublk_thread_deinit(t); return NULL; } @@ -855,20 +910,20 @@ static int ublk_send_dev_event(const struct dev_ctx *ctx, struct ublk_dev *dev, static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) { const struct ublksrv_ctrl_dev_info *dinfo = &dev->dev_info; - struct ublk_queue_info *qinfo; + struct ublk_thread_info *tinfo; + unsigned long long extra_flags = 0; cpu_set_t *affinity_buf; void *thread_ret; - sem_t queue_sem; + sem_t ready; int ret, i; ublk_dbg(UBLK_DBG_DEV, "%s enter\n", __func__); - qinfo = (struct ublk_queue_info *)calloc(sizeof(struct ublk_queue_info), - dinfo->nr_hw_queues); - if (!qinfo) + tinfo = calloc(sizeof(struct ublk_thread_info), dev->nthreads); + if (!tinfo) return -ENOMEM; - sem_init(&queue_sem, 0, 0); + sem_init(&ready, 0, 0); ret = ublk_dev_prep(ctx, dev); if (ret) return ret; @@ -877,22 +932,44 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) if (ret) return ret; + if (ctx->auto_zc_fallback) + extra_flags = UBLKS_Q_AUTO_BUF_REG_FALLBACK; + for (i = 0; i < dinfo->nr_hw_queues; i++) { dev->q[i].dev = dev; dev->q[i].q_id = i; - qinfo[i].q = &dev->q[i]; - qinfo[i].queue_sem = &queue_sem; - qinfo[i].affinity = &affinity_buf[i]; - qinfo[i].auto_zc_fallback = ctx->auto_zc_fallback; - pthread_create(&dev->q[i].thread, NULL, + ret = ublk_queue_init(&dev->q[i], extra_flags); + if (ret) { + ublk_err("ublk dev %d queue %d init queue failed\n", + dinfo->dev_id, i); + goto fail; + } + } + + for (i = 0; i < dev->nthreads; i++) { + tinfo[i].dev = dev; + tinfo[i].idx = i; + tinfo[i].ready = &ready; + + /* + * If threads are not tied 1:1 to queues, setting thread + * affinity based on queue affinity makes little sense. + * However, thread CPU affinity has significant impact + * on performance, so to compare fairly, we'll still set + * thread CPU affinity based on queue affinity where + * possible. + */ + if (dev->nthreads == dinfo->nr_hw_queues) + tinfo[i].affinity = &affinity_buf[i]; + pthread_create(&dev->threads[i].thread, NULL, ublk_io_handler_fn, - &qinfo[i]); + &tinfo[i]); } - for (i = 0; i < dinfo->nr_hw_queues; i++) - sem_wait(&queue_sem); - free(qinfo); + for (i = 0; i < dev->nthreads; i++) + sem_wait(&ready); + free(tinfo); free(affinity_buf); /* everything is fine now, start us */ @@ -914,9 +991,11 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) ublk_send_dev_event(ctx, dev, dev->dev_info.dev_id); /* wait until we are terminated */ - for (i = 0; i < dinfo->nr_hw_queues; i++) - pthread_join(dev->q[i].thread, &thread_ret); + for (i = 0; i < dev->nthreads; i++) + pthread_join(dev->threads[i].thread, &thread_ret); fail: + for (i = 0; i < dinfo->nr_hw_queues; i++) + ublk_queue_deinit(&dev->q[i]); ublk_dev_unprep(dev); ublk_dbg(UBLK_DBG_DEV, "%s exit\n", __func__); @@ -1022,13 +1101,14 @@ wait: static int __cmd_dev_add(const struct dev_ctx *ctx) { + unsigned nthreads = ctx->nthreads; unsigned nr_queues = ctx->nr_hw_queues; const char *tgt_type = ctx->tgt_type; unsigned depth = ctx->queue_depth; __u64 features; const struct ublk_tgt_ops *ops; struct ublksrv_ctrl_dev_info *info; - struct ublk_dev *dev; + struct ublk_dev *dev = NULL; int dev_id = ctx->dev_id; int ret, i; @@ -1036,29 +1116,55 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) if (!ops) { ublk_err("%s: no such tgt type, type %s\n", __func__, tgt_type); - return -ENODEV; + ret = -ENODEV; + goto fail; } if (nr_queues > UBLK_MAX_QUEUES || depth > UBLK_QUEUE_DEPTH) { ublk_err("%s: invalid nr_queues or depth queues %u depth %u\n", __func__, nr_queues, depth); - return -EINVAL; + ret = -EINVAL; + goto fail; + } + + /* default to 1:1 threads:queues if nthreads is unspecified */ + if (!nthreads) + nthreads = nr_queues; + + if (nthreads > UBLK_MAX_THREADS) { + ublk_err("%s: %u is too many threads (max %u)\n", + __func__, nthreads, UBLK_MAX_THREADS); + ret = -EINVAL; + goto fail; + } + + if (nthreads != nr_queues && !ctx->per_io_tasks) { + ublk_err("%s: threads %u must be same as queues %u if " + "not using per_io_tasks\n", + __func__, nthreads, nr_queues); + ret = -EINVAL; + goto fail; } dev = ublk_ctrl_init(); if (!dev) { ublk_err("%s: can't alloc dev id %d, type %s\n", __func__, dev_id, tgt_type); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } /* kernel doesn't support get_features */ ret = ublk_ctrl_get_features(dev, &features); - if (ret < 0) - return -EINVAL; + if (ret < 0) { + ret = -EINVAL; + goto fail; + } - if (!(features & UBLK_F_CMD_IOCTL_ENCODE)) - return -ENOTSUP; + if (!(features & UBLK_F_CMD_IOCTL_ENCODE)) { + ret = -ENOTSUP; + goto fail; + } info = &dev->dev_info; info->dev_id = ctx->dev_id; @@ -1068,6 +1174,8 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) if ((features & UBLK_F_QUIESCE) && (info->flags & UBLK_F_USER_RECOVERY)) info->flags |= UBLK_F_QUIESCE; + dev->nthreads = nthreads; + dev->per_io_tasks = ctx->per_io_tasks; dev->tgt.ops = ops; dev->tgt.sq_depth = depth; dev->tgt.cq_depth = depth; @@ -1097,7 +1205,8 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) fail: if (ret < 0) ublk_send_dev_event(ctx, dev, -1); - ublk_ctrl_deinit(dev); + if (dev) + ublk_ctrl_deinit(dev); return ret; } @@ -1159,6 +1268,8 @@ run: shmctl(ctx->_shmid, IPC_RMID, NULL); /* wait for child and detach from it */ wait(NULL); + if (exit_code == EXIT_FAILURE) + ublk_err("%s: command failed\n", __func__); exit(exit_code); } else { exit(EXIT_FAILURE); @@ -1266,6 +1377,7 @@ static int cmd_dev_get_features(void) [const_ilog2(UBLK_F_UPDATE_SIZE)] = "UPDATE_SIZE", [const_ilog2(UBLK_F_AUTO_BUF_REG)] = "AUTO_BUF_REG", [const_ilog2(UBLK_F_QUIESCE)] = "QUIESCE", + [const_ilog2(UBLK_F_PER_IO_DAEMON)] = "PER_IO_DAEMON", }; struct ublk_dev *dev; __u64 features = 0; @@ -1360,8 +1472,10 @@ static void __cmd_create_help(char *exe, bool recovery) exe, recovery ? "recover" : "add"); printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n"); printf("\t[-e 0|1 ] [-i 0|1]\n"); + printf("\t[--nthreads threads] [--per_io_tasks]\n"); printf("\t[target options] [backfile1] [backfile2] ...\n"); printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); + printf("\tdefault: nthreads=nr_queues"); for (i = 0; i < sizeof(tgt_ops_list) / sizeof(tgt_ops_list[0]); i++) { const struct ublk_tgt_ops *ops = tgt_ops_list[i]; @@ -1418,6 +1532,8 @@ int main(int argc, char *argv[]) { "auto_zc", 0, NULL, 0 }, { "auto_zc_fallback", 0, NULL, 0 }, { "size", 1, NULL, 's'}, + { "nthreads", 1, NULL, 0 }, + { "per_io_tasks", 0, NULL, 0 }, { 0, 0, 0, 0 } }; const struct ublk_tgt_ops *ops = NULL; @@ -1493,6 +1609,10 @@ int main(int argc, char *argv[]) ctx.flags |= UBLK_F_AUTO_BUF_REG; if (!strcmp(longopts[option_idx].name, "auto_zc_fallback")) ctx.auto_zc_fallback = 1; + if (!strcmp(longopts[option_idx].name, "nthreads")) + ctx.nthreads = strtol(optarg, NULL, 10); + if (!strcmp(longopts[option_idx].name, "per_io_tasks")) + ctx.per_io_tasks = 1; break; case '?': /* diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h index e34508bf5798..219233f8a053 100644 --- a/tools/testing/selftests/ublk/kublk.h +++ b/tools/testing/selftests/ublk/kublk.h @@ -29,13 +29,9 @@ #include "ublk_dep.h" #include <linux/ublk_cmd.h> -#define __maybe_unused __attribute__((unused)) -#define MAX_BACK_FILES 4 -#ifndef min -#define min(a, b) ((a) < (b) ? (a) : (b)) -#endif +#include "utils.h" -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) +#define MAX_BACK_FILES 4 /****************** part 1: libublk ********************/ @@ -45,22 +41,16 @@ #define UBLK_CTRL_RING_DEPTH 32 #define ERROR_EVTFD_DEVID -2 -/* queue idle timeout */ -#define UBLKSRV_IO_IDLE_SECS 20 - #define UBLK_IO_MAX_BYTES (1 << 20) -#define UBLK_MAX_QUEUES 32 +#define UBLK_MAX_QUEUES_SHIFT 5 +#define UBLK_MAX_QUEUES (1 << UBLK_MAX_QUEUES_SHIFT) +#define UBLK_MAX_THREADS_SHIFT 5 +#define UBLK_MAX_THREADS (1 << UBLK_MAX_THREADS_SHIFT) #define UBLK_QUEUE_DEPTH 1024 -#define UBLK_DBG_DEV (1U << 0) -#define UBLK_DBG_QUEUE (1U << 1) -#define UBLK_DBG_IO_CMD (1U << 2) -#define UBLK_DBG_IO (1U << 3) -#define UBLK_DBG_CTRL_CMD (1U << 4) -#define UBLK_LOG (1U << 5) - struct ublk_dev; struct ublk_queue; +struct ublk_thread; struct stripe_ctx { /* stripe */ @@ -76,6 +66,7 @@ struct dev_ctx { char tgt_type[16]; unsigned long flags; unsigned nr_hw_queues; + unsigned short nthreads; unsigned queue_depth; int dev_id; int nr_files; @@ -85,6 +76,7 @@ struct dev_ctx { unsigned int fg:1; unsigned int recovery:1; unsigned int auto_zc_fallback:1; + unsigned int per_io_tasks:1; int _evtfd; int _shmid; @@ -115,16 +107,19 @@ struct ublk_ctrl_cmd_data { struct ublk_io { char *buf_addr; -#define UBLKSRV_NEED_FETCH_RQ (1UL << 0) -#define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) -#define UBLKSRV_IO_FREE (1UL << 2) -#define UBLKSRV_NEED_GET_DATA (1UL << 3) -#define UBLKSRV_NEED_REG_BUF (1UL << 4) +#define UBLKS_IO_NEED_FETCH_RQ (1UL << 0) +#define UBLKS_IO_NEED_COMMIT_RQ_COMP (1UL << 1) +#define UBLKS_IO_FREE (1UL << 2) +#define UBLKS_IO_NEED_GET_DATA (1UL << 3) +#define UBLKS_IO_NEED_REG_BUF (1UL << 4) unsigned short flags; unsigned short refs; /* used by target code only */ + int tag; + int result; + unsigned short buf_index; unsigned short tgt_ios; void *private_data; }; @@ -134,9 +129,9 @@ struct ublk_tgt_ops { int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *); void (*deinit_tgt)(struct ublk_dev *); - int (*queue_io)(struct ublk_queue *, int tag); - void (*tgt_io_done)(struct ublk_queue *, - int tag, const struct io_uring_cqe *); + int (*queue_io)(struct ublk_thread *, struct ublk_queue *, int tag); + void (*tgt_io_done)(struct ublk_thread *, struct ublk_queue *, + const struct io_uring_cqe *); /* * Target specific command line handling @@ -165,28 +160,37 @@ struct ublk_tgt { struct ublk_queue { int q_id; int q_depth; - unsigned int cmd_inflight; - unsigned int io_inflight; struct ublk_dev *dev; const struct ublk_tgt_ops *tgt_ops; struct ublksrv_io_desc *io_cmd_buf; - struct io_uring ring; + +/* borrow one bit of ublk uapi flags, which may never be used */ +#define UBLKS_Q_AUTO_BUF_REG_FALLBACK (1ULL << 63) + __u64 flags; struct ublk_io ios[UBLK_QUEUE_DEPTH]; -#define UBLKSRV_QUEUE_STOPPING (1U << 0) -#define UBLKSRV_QUEUE_IDLE (1U << 1) -#define UBLKSRV_NO_BUF (1U << 2) -#define UBLKSRV_ZC (1U << 3) -#define UBLKSRV_AUTO_BUF_REG (1U << 4) -#define UBLKSRV_AUTO_BUF_REG_FALLBACK (1U << 5) - unsigned state; - pid_t tid; +}; + +struct ublk_thread { + struct ublk_dev *dev; + struct io_uring ring; + unsigned int cmd_inflight; + unsigned int io_inflight; + pthread_t thread; + unsigned idx; + +#define UBLKS_T_STOPPING (1U << 0) +#define UBLKS_T_IDLE (1U << 1) + unsigned state; }; struct ublk_dev { struct ublk_tgt tgt; struct ublksrv_ctrl_dev_info dev_info; struct ublk_queue q[UBLK_MAX_QUEUES]; + struct ublk_thread threads[UBLK_MAX_THREADS]; + unsigned nthreads; + unsigned per_io_tasks; int fds[MAX_BACK_FILES + 1]; /* fds[0] points to /dev/ublkcN */ int nr_fds; @@ -196,22 +200,7 @@ struct ublk_dev { void *private_data; }; -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) -#endif - -#ifndef container_of -#define container_of(ptr, type, member) ({ \ - unsigned long __mptr = (unsigned long)(ptr); \ - ((type *)(__mptr - offsetof(type, member))); }) -#endif - -#define round_up(val, rnd) \ - (((val) + ((rnd) - 1)) & ~((rnd) - 1)) - - -extern unsigned int ublk_dbg_mask; -extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag); +extern int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io); static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod) @@ -225,11 +214,14 @@ static inline int is_target_io(__u64 user_data) } static inline __u64 build_user_data(unsigned tag, unsigned op, - unsigned tgt_data, unsigned is_target_io) + unsigned tgt_data, unsigned q_id, unsigned is_target_io) { - assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16)); + /* we only have 7 bits to encode q_id */ + _Static_assert(UBLK_MAX_QUEUES_SHIFT <= 7); + assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16) && !(q_id >> 7)); - return tag | (op << 16) | (tgt_data << 24) | (__u64)is_target_io << 63; + return tag | (op << 16) | (tgt_data << 24) | + (__u64)q_id << 56 | (__u64)is_target_io << 63; } static inline unsigned int user_data_to_tag(__u64 user_data) @@ -247,50 +239,33 @@ static inline unsigned int user_data_to_tgt_data(__u64 user_data) return (user_data >> 24) & 0xffff; } -static inline unsigned short ublk_cmd_op_nr(unsigned int op) -{ - return _IOC_NR(op); -} - -static inline void ublk_err(const char *fmt, ...) +static inline unsigned int user_data_to_q_id(__u64 user_data) { - va_list ap; - - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); + return (user_data >> 56) & 0x7f; } -static inline void ublk_log(const char *fmt, ...) +static inline unsigned short ublk_cmd_op_nr(unsigned int op) { - if (ublk_dbg_mask & UBLK_LOG) { - va_list ap; - - va_start(ap, fmt); - vfprintf(stdout, fmt, ap); - } + return _IOC_NR(op); } -static inline void ublk_dbg(int level, const char *fmt, ...) +static inline struct ublk_queue *ublk_io_to_queue(const struct ublk_io *io) { - if (level & ublk_dbg_mask) { - va_list ap; - - va_start(ap, fmt); - vfprintf(stdout, fmt, ap); - } + return container_of(io, struct ublk_queue, ios[io->tag]); } -static inline int ublk_queue_alloc_sqes(struct ublk_queue *q, +static inline int ublk_io_alloc_sqes(struct ublk_thread *t, struct io_uring_sqe *sqes[], int nr_sqes) { - unsigned left = io_uring_sq_space_left(&q->ring); + struct io_uring *ring = &t->ring; + unsigned left = io_uring_sq_space_left(ring); int i; if (left < nr_sqes) - io_uring_submit(&q->ring); + io_uring_submit(ring); for (i = 0; i < nr_sqes; i++) { - sqes[i] = io_uring_get_sqe(&q->ring); + sqes[i] = io_uring_get_sqe(ring); if (!sqes[i]) return i; } @@ -345,7 +320,7 @@ static inline int ublk_get_io_res(const struct ublk_queue *q, unsigned tag) static inline void ublk_mark_io_done(struct ublk_io *io, int res) { - io->flags |= (UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_IO_FREE); + io->flags |= (UBLKS_IO_NEED_COMMIT_RQ_COMP | UBLKS_IO_FREE); io->result = res; } @@ -367,45 +342,58 @@ static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag) return &q->ios[tag]; } -static inline int ublk_complete_io(struct ublk_queue *q, unsigned tag, int res) +static inline int ublk_complete_io(struct ublk_thread *t, struct ublk_queue *q, + unsigned tag, int res) { struct ublk_io *io = &q->ios[tag]; ublk_mark_io_done(io, res); - return ublk_queue_io_cmd(q, io, tag); + return ublk_queue_io_cmd(t, io); } -static inline void ublk_queued_tgt_io(struct ublk_queue *q, unsigned tag, int queued) +static inline void ublk_queued_tgt_io(struct ublk_thread *t, struct ublk_queue *q, + unsigned tag, int queued) { if (queued < 0) - ublk_complete_io(q, tag, queued); + ublk_complete_io(t, q, tag, queued); else { struct ublk_io *io = ublk_get_io(q, tag); - q->io_inflight += queued; + t->io_inflight += queued; io->tgt_ios = queued; io->result = 0; } } -static inline int ublk_completed_tgt_io(struct ublk_queue *q, unsigned tag) +static inline int ublk_completed_tgt_io(struct ublk_thread *t, + struct ublk_queue *q, unsigned tag) { struct ublk_io *io = ublk_get_io(q, tag); - q->io_inflight--; + t->io_inflight--; return --io->tgt_ios == 0; } static inline int ublk_queue_use_zc(const struct ublk_queue *q) { - return q->state & UBLKSRV_ZC; + return q->flags & UBLK_F_SUPPORT_ZERO_COPY; } static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q) { - return q->state & UBLKSRV_AUTO_BUF_REG; + return q->flags & UBLK_F_AUTO_BUF_REG; +} + +static inline int ublk_queue_auto_zc_fallback(const struct ublk_queue *q) +{ + return q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK; +} + +static inline int ublk_queue_no_buf(const struct ublk_queue *q) +{ + return ublk_queue_use_zc(q) || ublk_queue_use_auto_zc(q); } extern const struct ublk_tgt_ops null_tgt_ops; @@ -416,10 +404,4 @@ extern const struct ublk_tgt_ops fault_inject_tgt_ops; void backing_file_tgt_deinit(struct ublk_dev *dev); int backing_file_tgt_init(struct ublk_dev *dev); -static inline unsigned int ilog2(unsigned int x) -{ - if (x == 0) - return 0; - return (sizeof(x) * 8 - 1) - __builtin_clz(x); -} #endif diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c index 44aca31cf2b0..f0e0003a4860 100644 --- a/tools/testing/selftests/ublk/null.c +++ b/tools/testing/selftests/ublk/null.c @@ -43,7 +43,7 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) } static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, - struct io_uring_sqe *sqe) + struct io_uring_sqe *sqe, int q_id) { unsigned ublk_op = ublksrv_get_op(iod); @@ -52,44 +52,47 @@ static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, sqe->flags |= IOSQE_FIXED_FILE; sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; sqe->len = iod->nr_sectors << 9; /* injected result */ - sqe->user_data = build_user_data(tag, ublk_op, 0, 1); + sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1); } -static int null_queue_zc_io(struct ublk_queue *q, int tag) +static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); struct io_uring_sqe *sqe[3]; - ublk_queue_alloc_sqes(q, sqe, 3); + ublk_io_alloc_sqes(t, sqe, 3); - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; - __setup_nop_io(tag, iod, sqe[1]); + __setup_nop_io(tag, iod, sqe[1], q->q_id); sqe[1]->flags |= IOSQE_IO_HARDLINK; - io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); - sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); + io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); + sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); // buf register is marked as IOSQE_CQE_SKIP_SUCCESS return 2; } -static int null_queue_auto_zc_io(struct ublk_queue *q, int tag) +static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); struct io_uring_sqe *sqe[1]; - ublk_queue_alloc_sqes(q, sqe, 1); - __setup_nop_io(tag, iod, sqe[0]); + ublk_io_alloc_sqes(t, sqe, 1); + __setup_nop_io(tag, iod, sqe[0], q->q_id); return 1; } -static void ublk_null_io_done(struct ublk_queue *q, int tag, - const struct io_uring_cqe *cqe) +static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q, + const struct io_uring_cqe *cqe) { + unsigned tag = user_data_to_tag(cqe->user_data); unsigned op = user_data_to_op(cqe->user_data); struct ublk_io *io = ublk_get_io(q, tag); @@ -105,11 +108,12 @@ static void ublk_null_io_done(struct ublk_queue *q, int tag, if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF)) io->tgt_ios += 1; - if (ublk_completed_tgt_io(q, tag)) - ublk_complete_io(q, tag, io->result); + if (ublk_completed_tgt_io(t, q, tag)) + ublk_complete_io(t, q, tag, io->result); } -static int ublk_null_queue_io(struct ublk_queue *q, int tag) +static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); unsigned auto_zc = ublk_queue_use_auto_zc(q); @@ -117,14 +121,14 @@ static int ublk_null_queue_io(struct ublk_queue *q, int tag) int queued; if (auto_zc && !ublk_io_auto_zc_fallback(iod)) - queued = null_queue_auto_zc_io(q, tag); + queued = null_queue_auto_zc_io(t, q, tag); else if (zc) - queued = null_queue_zc_io(q, tag); + queued = null_queue_zc_io(t, q, tag); else { - ublk_complete_io(q, tag, iod->nr_sectors << 9); + ublk_complete_io(t, q, tag, iod->nr_sectors << 9); return 0; } - ublk_queued_tgt_io(q, tag, queued); + ublk_queued_tgt_io(t, q, tag, queued); return 0; } @@ -134,9 +138,9 @@ static int ublk_null_queue_io(struct ublk_queue *q, int tag) */ static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag) { - if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + if (ublk_queue_auto_zc_fallback(q)) return (unsigned short)-1; - return tag; + return q->ios[tag].buf_index; } const struct ublk_tgt_ops null_tgt_ops = { diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c index 404a143bf3d6..1fb9b7cc281b 100644 --- a/tools/testing/selftests/ublk/stripe.c +++ b/tools/testing/selftests/ublk/stripe.c @@ -123,7 +123,8 @@ static inline enum io_uring_op stripe_to_uring_op( assert(0); } -static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) +static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, + const struct ublksrv_io_desc *iod, int tag) { const struct stripe_conf *conf = get_chunk_shift(q); unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0); @@ -138,13 +139,13 @@ static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_ io->private_data = s; calculate_stripe_array(conf, iod, s, base); - ublk_queue_alloc_sqes(q, sqe, s->nr + extra); + ublk_io_alloc_sqes(t, sqe, s->nr + extra); if (zc) { - io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); + io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; sqe[0]->user_data = build_user_data(tag, - ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); + ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); } for (i = zc; i < s->nr + extra - zc; i++) { @@ -162,35 +163,38 @@ static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_ sqe[i]->flags |= IOSQE_IO_HARDLINK; } /* bit63 marks us as tgt io */ - sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1); + sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, q->q_id, 1); } if (zc) { struct io_uring_sqe *unreg = sqe[s->nr + 1]; - io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag); - unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1); + io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, io->buf_index); + unreg->user_data = build_user_data( + tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1); } /* register buffer is skip_success */ return s->nr + zc; } -static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) +static int handle_flush(struct ublk_thread *t, struct ublk_queue *q, + const struct ublksrv_io_desc *iod, int tag) { const struct stripe_conf *conf = get_chunk_shift(q); struct io_uring_sqe *sqe[NR_STRIPE]; int i; - ublk_queue_alloc_sqes(q, sqe, conf->nr_files); + ublk_io_alloc_sqes(t, sqe, conf->nr_files); for (i = 0; i < conf->nr_files; i++) { io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC); io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); - sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, 1); + sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, q->q_id, 1); } return conf->nr_files; } -static int stripe_queue_tgt_io(struct ublk_queue *q, int tag) +static int stripe_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); unsigned ublk_op = ublksrv_get_op(iod); @@ -198,7 +202,7 @@ static int stripe_queue_tgt_io(struct ublk_queue *q, int tag) switch (ublk_op) { case UBLK_IO_OP_FLUSH: - ret = handle_flush(q, iod, tag); + ret = handle_flush(t, q, iod, tag); break; case UBLK_IO_OP_WRITE_ZEROES: case UBLK_IO_OP_DISCARD: @@ -206,7 +210,7 @@ static int stripe_queue_tgt_io(struct ublk_queue *q, int tag) break; case UBLK_IO_OP_READ: case UBLK_IO_OP_WRITE: - ret = stripe_queue_tgt_rw_io(q, iod, tag); + ret = stripe_queue_tgt_rw_io(t, q, iod, tag); break; default: ret = -EINVAL; @@ -217,17 +221,19 @@ static int stripe_queue_tgt_io(struct ublk_queue *q, int tag) return ret; } -static int ublk_stripe_queue_io(struct ublk_queue *q, int tag) +static int ublk_stripe_queue_io(struct ublk_thread *t, struct ublk_queue *q, + int tag) { - int queued = stripe_queue_tgt_io(q, tag); + int queued = stripe_queue_tgt_io(t, q, tag); - ublk_queued_tgt_io(q, tag, queued); + ublk_queued_tgt_io(t, q, tag, queued); return 0; } -static void ublk_stripe_io_done(struct ublk_queue *q, int tag, - const struct io_uring_cqe *cqe) +static void ublk_stripe_io_done(struct ublk_thread *t, struct ublk_queue *q, + const struct io_uring_cqe *cqe) { + unsigned tag = user_data_to_tag(cqe->user_data); const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); unsigned op = user_data_to_op(cqe->user_data); struct ublk_io *io = ublk_get_io(q, tag); @@ -256,13 +262,13 @@ static void ublk_stripe_io_done(struct ublk_queue *q, int tag, } } - if (ublk_completed_tgt_io(q, tag)) { + if (ublk_completed_tgt_io(t, q, tag)) { int res = io->result; if (!res) res = iod->nr_sectors << 9; - ublk_complete_io(q, tag, res); + ublk_complete_io(t, q, tag, res); free_stripe_array(io->private_data); io->private_data = NULL; diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh index 0145569ee7e9..8a4dbd09feb0 100755 --- a/tools/testing/selftests/ublk/test_common.sh +++ b/tools/testing/selftests/ublk/test_common.sh @@ -278,6 +278,11 @@ __run_io_and_remove() fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \ --rw=randrw --norandommap --iodepth=256 --size="${size}" --numjobs="$(nproc)" \ --runtime=20 --time_based > /dev/null 2>&1 & + fio --name=batchjob --filename=/dev/ublkb"${dev_id}" --ioengine=io_uring \ + --rw=randrw --norandommap --iodepth=256 --size="${size}" \ + --numjobs="$(nproc)" --runtime=20 --time_based \ + --iodepth_batch_submit=32 --iodepth_batch_complete_min=32 \ + --force_async=7 > /dev/null 2>&1 & sleep 2 if [ "${kill_server}" = "yes" ]; then local state diff --git a/tools/testing/selftests/ublk/test_generic_12.sh b/tools/testing/selftests/ublk/test_generic_12.sh new file mode 100755 index 000000000000..7abbb00d251d --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_12.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_12" +ERR_CODE=0 + +if ! _have_program bpftrace; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "do imbalanced load, it should be balanced over I/O threads" + +NTHREADS=6 +dev_id=$(_add_ublk_dev -t null -q 4 -d 16 --nthreads $NTHREADS --per_io_tasks) +_check_add_dev $TID $? + +dev_t=$(_get_disk_dev_t "$dev_id") +bpftrace trace/count_ios_per_tid.bt "$dev_t" > "$UBLK_TMP" 2>&1 & +btrace_pid=$! +sleep 2 + +if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then + _cleanup_test "null" + exit "$UBLK_SKIP_CODE" +fi + +# do imbalanced I/O on the ublk device +# pin to cpu 0 to prevent migration/only target one queue +fio --name=write_seq \ + --filename=/dev/ublkb"${dev_id}" \ + --ioengine=libaio --iodepth=16 \ + --rw=write \ + --size=512M \ + --direct=1 \ + --bs=4k \ + --cpus_allowed=0 > /dev/null 2>&1 +ERR_CODE=$? +kill "$btrace_pid" +wait + +# check that every task handles some I/O, even though all I/O was issued +# from a single CPU. when ublk gets support for round-robin tag +# allocation, this check can be strengthened to assert that every thread +# handles the same number of I/Os +NR_THREADS_THAT_HANDLED_IO=$(grep -c '@' ${UBLK_TMP}) +if [[ $NR_THREADS_THAT_HANDLED_IO -ne $NTHREADS ]]; then + echo "only $NR_THREADS_THAT_HANDLED_IO handled I/O! expected $NTHREADS" + cat "$UBLK_TMP" + ERR_CODE=255 +fi + +_cleanup_test "null" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_03.sh b/tools/testing/selftests/ublk/test_stress_03.sh index 7d728ce50774..3ed4c9b2d8c0 100755 --- a/tools/testing/selftests/ublk/test_stress_03.sh +++ b/tools/testing/selftests/ublk/test_stress_03.sh @@ -32,14 +32,23 @@ _create_backfile 2 128M ublk_io_and_remove 8G -t null -q 4 -z & ublk_io_and_remove 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" & ublk_io_and_remove 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & +wait if _have_feature "AUTO_BUF_REG"; then ublk_io_and_remove 8G -t null -q 4 --auto_zc & ublk_io_and_remove 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & ublk_io_and_remove 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & + wait +fi + +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_remove 8G -t null -q 4 --auto_zc --nthreads 8 --per_io_tasks & + ublk_io_and_remove 256M -t loop -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 256M -t stripe -q 4 --auto_zc --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback --nthreads 8 --per_io_tasks & + wait fi -wait _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh index 9bcfa64ea1f0..40d1437ca298 100755 --- a/tools/testing/selftests/ublk/test_stress_04.sh +++ b/tools/testing/selftests/ublk/test_stress_04.sh @@ -38,6 +38,13 @@ if _have_feature "AUTO_BUF_REG"; then ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & fi + +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & + ublk_io_and_kill_daemon 256M -t loop -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q 4 --nthreads 8 --per_io_tasks "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_kill_daemon 8G -t null -q 4 --nthreads 8 --per_io_tasks & +fi wait _cleanup_test "stress" diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh index bcfc904cefc6..566cfd90d192 100755 --- a/tools/testing/selftests/ublk/test_stress_05.sh +++ b/tools/testing/selftests/ublk/test_stress_05.sh @@ -69,5 +69,12 @@ if _have_feature "AUTO_BUF_REG"; then done fi +if _have_feature "PER_IO_DAEMON"; then + ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 8G -t null -q 4 --nthreads 8 --per_io_tasks -r 1 -i "$reissue" & +fi +wait + _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt b/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt new file mode 100644 index 000000000000..f4aa63ff2938 --- /dev/null +++ b/tools/testing/selftests/ublk/trace/count_ios_per_tid.bt @@ -0,0 +1,11 @@ +/* + * Tabulates and prints I/O completions per thread for the given device + * + * $1: dev_t +*/ +tracepoint:block:block_rq_complete +{ + if (args.dev == $1) { + @[tid] = count(); + } +} diff --git a/tools/testing/selftests/ublk/utils.h b/tools/testing/selftests/ublk/utils.h new file mode 100644 index 000000000000..36545d1567f1 --- /dev/null +++ b/tools/testing/selftests/ublk/utils.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef KUBLK_UTILS_H +#define KUBLK_UTILS_H + +#define __maybe_unused __attribute__((unused)) + +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#endif + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + unsigned long __mptr = (unsigned long)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); }) +#endif + +#define round_up(val, rnd) \ + (((val) + ((rnd) - 1)) & ~((rnd) - 1)) + +static inline unsigned int ilog2(unsigned int x) +{ + if (x == 0) + return 0; + return (sizeof(x) * 8 - 1) - __builtin_clz(x); +} + +#define UBLK_DBG_DEV (1U << 0) +#define UBLK_DBG_THREAD (1U << 1) +#define UBLK_DBG_IO_CMD (1U << 2) +#define UBLK_DBG_IO (1U << 3) +#define UBLK_DBG_CTRL_CMD (1U << 4) +#define UBLK_LOG (1U << 5) + +extern unsigned int ublk_dbg_mask; + +static inline void ublk_err(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); +} + +static inline void ublk_log(const char *fmt, ...) +{ + if (ublk_dbg_mask & UBLK_LOG) { + va_list ap; + + va_start(ap, fmt); + vfprintf(stdout, fmt, ap); + } +} + +static inline void ublk_dbg(int level, const char *fmt, ...) +{ + if (level & ublk_dbg_mask) { + va_list ap; + + va_start(ap, fmt); + vfprintf(stdout, fmt, ap); + } +} + +#endif |