summaryrefslogtreecommitdiff
path: root/include/trace/events
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-01-28 20:05:47 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-08 13:27:03 -0700
commit4bcb982cce74e18155fba0d97394ca9634e0d8f0 (patch)
treecfbaa74fe21d99a8175d9f23b1b8d3a579bd2562 /include/trace/events
parent5492a490e64ed47483a0b28f10ba07eef4a47edb (diff)
io_uring: expand main struct io_kiocb flags to 64-bits
We're out of space here, and none of the flags are easily reclaimable. Bump it to 64-bits and re-arrange the struct a bit to avoid gaps. Add a specific bitwise type for the request flags, io_request_flags_t. This will help catch violations of casting this value to a smaller type on 32-bit archs, like unsigned int. This creates a hole in the io_kiocb, so move nr_tw up and rsrc_node down to retain needing only cacheline 0 and 1 for non-polled opcodes. No functional changes intended in this patch. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/io_uring.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 69454f1f98b0..b241f2b12486 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -148,7 +148,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__field( void *, req )
__field( u64, user_data )
__field( u8, opcode )
- __field( unsigned int, flags )
+ __field( unsigned long long, flags )
__field( struct io_wq_work *, work )
__field( int, rw )
@@ -159,7 +159,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__entry->ctx = req->ctx;
__entry->req = req;
__entry->user_data = req->cqe.user_data;
- __entry->flags = req->flags;
+ __entry->flags = (__force unsigned long long) req->flags;
__entry->opcode = req->opcode;
__entry->work = &req->work;
__entry->rw = rw;
@@ -167,10 +167,10 @@ TRACE_EVENT(io_uring_queue_async_work,
__assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
__entry->ctx, __entry->req, __entry->user_data,
- __get_str(op_str),
- __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
+ __get_str(op_str), __entry->flags,
+ __entry->rw ? "hashed" : "normal", __entry->work)
);
/**
@@ -378,7 +378,7 @@ TRACE_EVENT(io_uring_submit_req,
__field( void *, req )
__field( unsigned long long, user_data )
__field( u8, opcode )
- __field( u32, flags )
+ __field( unsigned long long, flags )
__field( bool, sq_thread )
__string( op_str, io_uring_get_opcode(req->opcode) )
@@ -389,16 +389,16 @@ TRACE_EVENT(io_uring_submit_req,
__entry->req = req;
__entry->user_data = req->cqe.user_data;
__entry->opcode = req->opcode;
- __entry->flags = req->flags;
+ __entry->flags = (__force unsigned long long) req->flags;
__entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
__assign_str(op_str, io_uring_get_opcode(req->opcode));
),
- TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, "
"sq_thread %d", __entry->ctx, __entry->req,
- __entry->user_data, __get_str(op_str),
- __entry->flags, __entry->sq_thread)
+ __entry->user_data, __get_str(op_str), __entry->flags,
+ __entry->sq_thread)
);
/*